repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jkokorian/ODMAnalysis
|
odmanalysis/odmstudio/plugins/sourcereaders/csvreading.py
|
1
|
1250
|
from odmanalysis.odmstudio import odmstudio_lib as lib
from odmanalysis.odmstudio import odmstudio_framework as framework
import odmanalysis as odm
import pandas as pd
@framework.RegisterSourceReader("Comma separated", extensions=['csv'], maxNumberOfFiles=1)
class CsvReader(lib.SourceReader):
def __init__(self, dataSource):
lib.SourceReader.__init__(self, dataSource)
def read(self,path):
super(CsvReader, self).read(path)
self._setStatusMessage("reading...")
reader = odm.getODMDataReader(path,chunksize=500)
lineCount = float(sum(1 for line in open(path)))
chunks = []
linesRead = 1
for chunk in reader:
linesRead += 500
self.appendChunkToData(chunk)
self._setStatusMessage("%i lines read" % linesRead)
self._setProgress(linesRead/lineCount * 100)
self._setStatusMessage("File loaded")
self._setProgress(100)
def appendChunkToData(self,chunk):
if self.dataSource.sourceIsEmpty:
self.dataSource.setSourceDataFrame(chunk)
else:
self.dataSource.setSourceDataFrame(pd.concat([self.dataSource.sourceDataFrame,chunk]))
|
gpl-3.0
|
fzalkow/scikit-learn
|
benchmarks/bench_glm.py
|
297
|
1493
|
"""
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
gticket/scikit-neuralnetwork
|
sknn/tests/test_deep.py
|
5
|
3813
|
import unittest
from nose.tools import (assert_false, assert_raises, assert_true,
assert_equal, assert_in)
import io
import pickle
import numpy
import logging
from sklearn.base import clone
import sknn
from sknn.mlp import Regressor as MLPR
from sknn.mlp import Layer as L
from . import test_linear
class TestDeepNetwork(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(
layers=[
L("Rectifier", units=16),
L("Sigmoid", units=12),
L("Maxout", units=16, pieces=2),
L("Tanh", units=4),
L("Linear")],
n_iter=1)
def test_UnknownLayer(self):
assert_raises(NotImplementedError, L, "Unknown")
def test_UnknownActivation(self):
assert_raises(NotImplementedError, L, "Wrong", units=16)
# This class also runs all the tests from the linear network too.
class TestDeepDeterminism(unittest.TestCase):
def setUp(self):
self.a_in = numpy.random.uniform(0.0, 1.0, (8,16))
self.a_out = numpy.zeros((8,1))
def run_EqualityTest(self, copier, asserter):
# Only PyLearn2 supports Maxout.
extra = ["Maxout"] if sknn.backend.name != 'pylearn2' else []
for activation in ["Rectifier", "Sigmoid", "Tanh"] + extra:
nn1 = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn1._initialize(self.a_in, self.a_out)
nn2 = copier(nn1, activation)
print('activation', activation)
a_out1 = nn1.predict(self.a_in)
a_out2 = nn2.predict(self.a_in)
print(a_out1, a_out2)
asserter(numpy.all(nn1.predict(self.a_in) == nn2.predict(self.a_in)))
def test_DifferentSeedPredictNotEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=2345)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_false)
def test_SameSeedPredictEquals(self):
def ctor(_, activation):
nn = MLPR(layers=[L(activation, units=16, pieces=2), L("Linear", units=1)], random_state=1234)
nn._initialize(self.a_in, self.a_out)
return nn
self.run_EqualityTest(ctor, assert_true)
def test_ClonePredictEquals(self):
def cloner(nn, _):
cc = clone(nn)
cc._initialize(self.a_in, self.a_out)
return cc
self.run_EqualityTest(cloner, assert_true)
def test_SerializedPredictEquals(self):
def serialize(nn, _):
buf = io.BytesIO()
pickle.dump(nn, buf)
buf.seek(0)
return pickle.load(buf)
self.run_EqualityTest(serialize, assert_true)
class TestActivations(unittest.TestCase):
def setUp(self):
self.buf = io.StringIO()
self.hnd = logging.StreamHandler(self.buf)
logging.getLogger('sknn').addHandler(self.hnd)
logging.getLogger().setLevel(logging.WARNING)
def tearDown(self):
assert_equal('', self.buf.getvalue())
sknn.mlp.log.removeHandler(self.hnd)
@unittest.skipIf(sknn.backend.name != 'pylearn2', 'only pylearn2')
def test_MissingParameterException(self):
nn = MLPR(layers=[L("Maxout", units=32), L("Linear")])
a_in = numpy.zeros((8,16))
assert_raises(ValueError, nn._initialize, a_in, a_in)
def test_UnusedParameterWarning(self):
nn = MLPR(layers=[L("Linear", pieces=2)], n_iter=1)
a_in = numpy.zeros((8,16))
nn._initialize(a_in, a_in)
assert_in('Parameter `pieces` is unused', self.buf.getvalue())
self.buf = io.StringIO() # clear
|
bsd-3-clause
|
alorenzo175/pvlib-python
|
pvlib/test/conftest.py
|
1
|
11023
|
import inspect
import os
import platform
import numpy as np
import pandas as pd
from pkg_resources import parse_version
import pytest
import pvlib
pvlib_base_version = \
parse_version(parse_version(pvlib.__version__).base_version)
# decorator takes one argument: the base version for which it should fail
# for example @fail_on_pvlib_version('0.7') will cause a test to fail
# on pvlib versions 0.7a, 0.7b, 0.7rc1, etc.
# test function may not take args, kwargs, or fixtures.
def fail_on_pvlib_version(version):
# second level of decorator takes the function under consideration
def wrapper(func):
# third level defers computation until the test is called
# this allows the specific test to fail at test runtime,
# rather than at decoration time (when the module is imported)
def inner():
# fail if the version is too high
if pvlib_base_version >= parse_version(version):
pytest.fail('the tested function is scheduled to be '
'removed in %s' % version)
# otherwise return the function to be executed
else:
return func()
return inner
return wrapper
# commonly used directories in the tests
test_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
data_dir = os.path.join(test_dir, os.pardir, 'data')
platform_is_windows = platform.system() == 'Windows'
skip_windows = pytest.mark.skipif(platform_is_windows,
reason='does not run on windows')
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
requires_scipy = pytest.mark.skipif(not has_scipy, reason='requires scipy')
try:
import tables
has_tables = True
except ImportError:
has_tables = False
requires_tables = pytest.mark.skipif(not has_tables, reason='requires tables')
try:
import ephem
has_ephem = True
except ImportError:
has_ephem = False
requires_ephem = pytest.mark.skipif(not has_ephem, reason='requires ephem')
def pandas_0_17():
return parse_version(pd.__version__) >= parse_version('0.17.0')
needs_pandas_0_17 = pytest.mark.skipif(
not pandas_0_17(), reason='requires pandas 0.17 or greater')
def numpy_1_10():
return parse_version(np.__version__) >= parse_version('1.10.0')
needs_numpy_1_10 = pytest.mark.skipif(
not numpy_1_10(), reason='requires numpy 1.10 or greater')
def pandas_0_22():
return parse_version(pd.__version__) >= parse_version('0.22.0')
needs_pandas_0_22 = pytest.mark.skipif(
not pandas_0_22(), reason='requires pandas 0.22 or greater')
def has_spa_c():
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
return False
else:
return True
requires_spa_c = pytest.mark.skipif(not has_spa_c(), reason="requires spa_c")
def has_numba():
try:
import numba
except ImportError:
return False
else:
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
return False
else:
return True
requires_numba = pytest.mark.skipif(not has_numba(), reason="requires numba")
try:
import siphon
has_siphon = True
except ImportError:
has_siphon = False
requires_siphon = pytest.mark.skipif(not has_siphon,
reason='requires siphon')
try:
import netCDF4 # noqa: F401
has_netCDF4 = True
except ImportError:
has_netCDF4 = False
requires_netCDF4 = pytest.mark.skipif(not has_netCDF4,
reason='requires netCDF4')
try:
import pvfactors # noqa: F401
has_pvfactors = True
except ImportError:
has_pvfactors = False
requires_pvfactors = pytest.mark.skipif(not has_pvfactors,
reason='requires pvfactors')
try:
import PySAM # noqa: F401
has_pysam = True
except ImportError:
has_pysam = False
requires_pysam = pytest.mark.skipif(not has_pysam, reason="requires PySAM")
@pytest.fixture(scope="session")
def sam_data():
data = {}
data['sandiamod'] = pvlib.pvsystem.retrieve_sam('sandiamod')
data['adrinverter'] = pvlib.pvsystem.retrieve_sam('adrinverter')
return data
@pytest.fixture(scope="function")
def pvsyst_module_params():
"""
Define some PVSyst module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'gamma_ref': 1.05,
'mu_gamma': 0.001,
'I_L_ref': 6.0,
'I_o_ref': 5e-9,
'EgRef': 1.121,
'R_sh_ref': 300,
'R_sh_0': 1000,
'R_s': 0.5,
'R_sh_exp': 5.5,
'cells_in_series': 60,
'alpha_sc': 0.001,
}
return parameters
@pytest.fixture(scope='function')
def cec_inverter_parameters():
"""
Define some CEC inverter parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'ABB: MICRO-0.25-I-OUTD-US-208 208V [CEC 2014]',
'Vac': 208.0,
'Paco': 250.0,
'Pdco': 259.5220505,
'Vdco': 40.24260317,
'Pso': 1.771614224,
'C0': -2.48e-5,
'C1': -9.01e-5,
'C2': 6.69e-4,
'C3': -0.0189,
'Pnt': 0.02,
'Vdcmax': 65.0,
'Idcmax': 10.0,
'Mppt_low': 20.0,
'Mppt_high': 50.0,
}
return parameters
@pytest.fixture(scope='function')
def cec_module_params():
"""
Define some CEC module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'Example Module',
'BIPV': 'Y',
'Date': '4/28/2008',
'T_NOCT': 65,
'A_c': 0.67,
'N_s': 18,
'I_sc_ref': 7.5,
'V_oc_ref': 10.4,
'I_mp_ref': 6.6,
'V_mp_ref': 8.4,
'alpha_sc': 0.003,
'beta_oc': -0.04,
'a_ref': 0.473,
'I_L_ref': 7.545,
'I_o_ref': 1.94e-09,
'R_s': 0.094,
'R_sh_ref': 15.72,
'Adjust': 10.6,
'gamma_r': -0.5,
'Version': 'MM105',
'PTC': 48.9,
'Technology': 'Multi-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_cs5p_220m():
"""
Define Canadian Solar CS5P-220M module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'Canadian Solar CS5P-220M',
'BIPV': 'N',
'Date': '10/5/2009',
'T_NOCT': 42.4,
'A_c': 1.7,
'N_s': 96,
'I_sc_ref': 5.1,
'V_oc_ref': 59.4,
'I_mp_ref': 4.69,
'V_mp_ref': 46.9,
'alpha_sc': 0.004539,
'beta_oc': -0.22216,
'a_ref': 2.6373,
'I_L_ref': 5.114,
'I_o_ref': 8.196e-10,
'R_s': 1.065,
'R_sh_ref': 381.68,
'Adjust': 8.7,
'gamma_r': -0.476,
'Version': 'MM106',
'PTC': 200.1,
'Technology': 'Mono-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_spr_e20_327():
"""
Define SunPower SPR-E20-327 module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'SunPower SPR-E20-327',
'BIPV': 'N',
'Date': '1/14/2013',
'T_NOCT': 46,
'A_c': 1.631,
'N_s': 96,
'I_sc_ref': 6.46,
'V_oc_ref': 65.1,
'I_mp_ref': 5.98,
'V_mp_ref': 54.7,
'alpha_sc': 0.004522,
'beta_oc': -0.23176,
'a_ref': 2.6868,
'I_L_ref': 6.468,
'I_o_ref': 1.88e-10,
'R_s': 0.37,
'R_sh_ref': 298.13,
'Adjust': -0.1862,
'gamma_r': -0.386,
'Version': 'NRELv1',
'PTC': 301.4,
'Technology': 'Mono-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_fs_495():
"""
Define First Solar FS-495 module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'First Solar FS-495',
'BIPV': 'N',
'Date': '9/18/2014',
'T_NOCT': 44.6,
'A_c': 0.72,
'N_s': 216,
'I_sc_ref': 1.55,
'V_oc_ref': 86.5,
'I_mp_ref': 1.4,
'V_mp_ref': 67.9,
'alpha_sc': 0.000924,
'beta_oc': -0.22741,
'a_ref': 2.9482,
'I_L_ref': 1.563,
'I_o_ref': 2.64e-13,
'R_s': 6.804,
'R_sh_ref': 806.27,
'Adjust': -10.65,
'gamma_r': -0.264,
'Version': 'NRELv1',
'PTC': 89.7,
'Technology': 'CdTe',
}
return parameters
@pytest.fixture(scope='function')
def sapm_temperature_cs5p_220m():
# SAPM temperature model parameters for Canadian_Solar_CS5P_220M
# (glass/polymer) in open rack
return {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3}
@pytest.fixture(scope='function')
def sapm_module_params():
"""
Define SAPM model parameters for Canadian Solar CS5P 220M module.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {'Material': 'c-Si',
'Cells_in_Series': 96,
'Parallel_Strings': 1,
'A0': 0.928385,
'A1': 0.068093,
'A2': -0.0157738,
'A3': 0.0016606,
'A4': -6.93E-05,
'B0': 1,
'B1': -0.002438,
'B2': 0.0003103,
'B3': -0.00001246,
'B4': 2.11E-07,
'B5': -1.36E-09,
'C0': 1.01284,
'C1': -0.0128398,
'C2': 0.279317,
'C3': -7.24463,
'C4': 0.996446,
'C5': 0.003554,
'C6': 1.15535,
'C7': -0.155353,
'Isco': 5.09115,
'Impo': 4.54629,
'Voco': 59.2608,
'Vmpo': 48.3156,
'Aisc': 0.000397,
'Aimp': 0.000181,
'Bvoco': -0.21696,
'Mbvoc': 0.0,
'Bvmpo': -0.235488,
'Mbvmp': 0.0,
'N': 1.4032,
'IXO': 4.97599,
'IXXO': 3.18803,
'FD': 1}
return parameters
|
bsd-3-clause
|
ctogle/dilapidator
|
test/topology/partitiongraph_tests.py
|
1
|
2592
|
from dilap.geometry.vec3 import vec3
import dilap.geometry.tools as gtl
import dilap.geometry.polymath as pym
import dilap.topology.partitiongraph as ptg
import dilap.worldly.blockletters as dbl
import dilap.core.plotting as dtl
import matplotlib.pyplot as plt
import unittest,random
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_partitiongraph(unittest.TestCase):
def test_av(self):
fp = dbl.block('H',10,25,25)
rg = ptg.partitiongraph()
bs = pym.bsegsxy(fp[0],vec3(0,-100,0),vec3(0,100,0))
for b in bs:
i = rg.av(b = [b,[]],p = vec3(0,0,0).com(b),l = 0)
rg.plotxy()
plt.show()
pg = rg.bgraph()
pgpy = pg.polygon(1,'ccw')
ax = pg.plotxy()
#ax = dtl.plot_axes_xy(20)
#ax = dtl.plot_polygon_full_xy(pgpy,ax,lw = 2)
ax = dtl.plot_polygon_xy(pgpy[0],ax,lw = 2)
for pgp in pgpy[1]:
ax = dtl.plot_polygon_xy(pgp,ax,lw = 2)
plt.show()
def atest_split(self):
fp = dbl.block('H',10,25,25)
rg = ptg.partitiongraph()
bs = pym.bsegsxy(fp[0],vec3(0,-100,0),vec3(0,100,0))
b = fp[0]
i1 = rg.av(b = [b,[]],p = vec3(0,0,0).com(b),l = 0)
i2 = rg.sv(0,bs[0],bs[1])
rg.plotxy()
plt.show()
def atest_break(self):
fp = dbl.block('H',10,25,25)
v1 = {'b':fp,'p':vec3(0,0,0).com(fp[0]),'l':0}
rg = ptg.partitiongraph()
ip = vec3(0.5,0.5,0)
i1 = rg.av(**v1)
i2 = rg.bv(0,ip,vec3(1,0,0))
i3 = rg.bv(0,ip,vec3(0,1,0))
i4 = rg.bv(1,ip,vec3(0,1,0))
rg.vves()
rg.plotxy()
plt.show()
def atest_bgraph(self):
fp = dbl.block('H',10,25,25)
v1 = {'b':fp,'p':vec3(0,0,0).com(fp[0]),'l':0}
rg = ptg.partitiongraph()
ip = vec3(0.5,0.5,0)
i1 = rg.av(**v1)
#i2 = rg.bv(0,ip,vec3(1,0,0))
i3 = rg.bv(0,ip,vec3(0,1,0))
#i4 = rg.bv(1,ip,vec3(0,1,0))
rg.vves()
pg = rg.bgraph()
#pdb.set_trace()
pgpy = pg.polygon(1,'ccw')
ax = pg.plotxy()
#ax = dtl.plot_axes_xy(20)
#ax = dtl.plot_polygon_full_xy(pgpy,ax,lw = 2)
ax = dtl.plot_polygon_xy(pgpy[0],ax,lw = 2)
for pgp in pgpy[1]:
ax = dtl.plot_polygon_xy(pgp,ax,lw = 2)
plt.show()
###############################################################################
if __name__ == '__main__':unittest.main()
###############################################################################
|
mit
|
mathnathan/notebooks
|
model.py
|
1
|
5048
|
import cv2
from numpy import *
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter as gauss
set_printoptions(1)
class Model:
def __init__(self, dim=21, thickness=5, outside=1.0, inside=10.0, calAmp=50,
fluctuations=0.2, noise=0.03, speed=1.0, breadth=3):
self.params = {}
self.params["dim"] = dim
self.params["processThickness"] = thickness
self.params["spikeBreadth"] = breadth
self.params["outsideConcentration"] = outside
self.params["insideConcentration"] = inside
self.params["calciumAmplitude"] = calAmp
self.params["concentrationVariance"] = fluctuations
self.params["signalNoise"] = noise
self.params["speed"] = speed
self.__makeData()
def __getitem__(self, key):
try:
return self.params[key]
except:
raise KeyError
def __setitem__(self, key, value):
try:
self.params[key] = value
except:
raise KeyError
self.__makeData()
def __makeData(self):
time = int(float(self["dim"] - self["spikeBreadth"])/self["speed"])+1
self.data = zeros((time, self["dim"], self["dim"]))
self.fyTrue = zeros((time, self["dim"], self["dim"]))
self.fxTrue = zeros((time-1, self["dim"], self["dim"]))
wall1 = self["dim"]/2 - self["processThickness"]/2
wall2 = wall1 + self["processThickness"]
self.data[:, :, :wall1] = self["outsideConcentration"]
self.data[:, :, wall1:wall2] = self["insideConcentration"]
self.data[:, :, wall2:] = self["outsideConcentration"]
for i,frame in enumerate(self.data):
d = int(i*self["speed"])
frame[d:d+self["spikeBreadth"], wall1:wall2] = self["calciumAmplitude"]
self.fyTrue[i, d:d+self["spikeBreadth"], wall1:wall2] = self["speed"]
self.fyTrue = self.fyTrue[:-1]
self.data += (2*random.random((time, self["dim"], self["dim"]))-1)*self.data*self["concentrationVariance"]
self.data += (2*random.random((time, self["dim"], self["dim"]))-1)*self["signalNoise"]*self["calciumAmplitude"]
def run(self):
self.calcFlow()
self.show()
self.error()
def calcFlow(self, relative=True, blur=(0,0,0), parameters=None):
flowParams = {'pyr_scale':0.5, 'levels':3, 'winsize':7, 'iterations':3, 'poly_n':5,
'poly_sigma':1.1, 'flags':cv2.OPTFLOW_FARNEBACK_GAUSSIAN}
flowParams = parameters if parameters else flowParams
frames, h, w = self.data.shape
self.xflow = ndarray((frames-1,h,w))
self.yflow = ndarray((frames-1,h,w))
data = self.data
if relative:
f0 = percentile(self.data,10,0);
plt.imshow(f0, cmap='gray', interpolation='nearest', vmin=f0.min(), vmax=f0.max())
plt.title("F0"); plt.colorbar()
data = (self.data-f0)/f0
blurData = gauss(data, blur)
prev = self.data[0]
for i,curr in enumerate(blurData[1:]):
flow = cv2.calcOpticalFlowFarneback(prev, curr, **flowParams)
self.xflow[i] = flow[:,:,0]
self.yflow[i] = flow[:,:,1]
prev = curr
def show(self, frames=[0,None], cols=3, parameters=None):
vecParams = {'pivot':'tail', 'angles':'xy', 'scale_units':'xy', 'color':'yellow'}
vecParams = parameters if parameters else vecParams
if type(frames) == int:
plt.figure(figsize=(12,12))
plt.imshow(self.data[frames], cmap='gray')
plt.quiver(self.xflow[frames], self.yflow[frames], **vecParams)
return
else:
vmin = self.data.min()
vmax = self.data.max()
begf, endf = frames
endf = endf if endf else len(self.xflow)
rows = int(ceil((endf-begf)/float(cols)))
fw = 13; fh = float(rows*fw)/cols
plt.figure(figsize=(fw, fh))
for i in range(begf, endf):
plt.subplot(rows,cols,i-begf+1)
plt.imshow(self.data[i], cmap='gray', interpolation='nearest', vmin=vmin, vmax=vmax); plt.colorbar()
plt.title("Flow from frame %d to frame %d" % (i,i+1))
plt.quiver(self.xflow[i], self.yflow[i], **vecParams)
plt.tight_layout()
def error(self):
trueNorms = sqrt(self.fxTrue**2 + self.fyTrue**2)
approxNorms = sqrt(self.xflow**2 + self.yflow**2)
maxErr = trueNorms + approxNorms
maxErr[abs(maxErr) < 1e-12] = 1.0
err = sqrt((self.fxTrue-self.xflow)**2 + (self.fyTrue-self.yflow)**2)/maxErr
print "Maximum Point-wise Error = ", err.max()
print "Minimum Point-wise Error = ", err.min()
frob = linalg.norm(err,'fro',(1,2))
print "Maximum Frobenius Norm = ", frob.max()
print "Minimum Frobenius Norm = ", frob.min()
totErr = average(frob)
print "Total Error = ", totErr
return totErr
|
mit
|
puolival/multipy
|
multipy/scripts/fig2.py
|
1
|
1095
|
# -*- coding: utf-8 -*-
"""Script for visualizing the significance decision line in the
Benjamini-Hochberg procedure.
Author: Tuomas Puoliväli
Email: [email protected]
Last modified 12th February 2018
Source: https://github.com/puolival/multipy
"""
# Allow importing modules from parent directory.
import sys
sys.path.append('..')
from data import neuhaus
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
alpha = 0.05 # The chosen critical level.
pvals = neuhaus(permute=False)
n_pvals = len(pvals)
k = np.linspace(1, n_pvals, n_pvals)
sel = np.arange(0, 8)
"""Plot the data."""
sns.set_style('darkgrid')
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(k[sel], pvals[sel], 'o-')
y = (alpha/n_pvals)*k + 0 # Line through the origin.
ax.plot(k[sel], y[sel], '-')
ax.legend(['P-value', 'Decision line'], loc='upper left')
ax.set_xlabel('Hypothesis')
ax.set_ylabel('P-value')
ax.set_title('Benjamini-Hochberg procedure')
ax.set_ylim([-0.01, np.max(pvals[sel])+0.01])
ax.set_xlim([0.5, np.max(sel)+1.5])
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/tseries/index.py
|
7
|
78738
|
# pylint: disable=E1101
from __future__ import division
import operator
import warnings
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.base import _shared_docs
from pandas.types.common import (_NS_DTYPE, _INT64_DTYPE,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
is_integer, is_float,
is_integer_dtype,
is_datetime64_ns_dtype,
is_period_dtype,
is_bool_dtype,
is_string_dtype,
is_list_like,
is_scalar,
pandas_dtype,
_ensure_int64)
from pandas.types.generic import ABCSeries
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import isnull
import pandas.types.concat as _concat
from pandas.core.common import (_values_from_object, _maybe_box,
PerformanceWarning)
from pandas.core.index import Index, Int64Index, Float64Index
from pandas.indexes.base import _index_shared_docs
import pandas.compat as compat
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.tseries.base import DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date, to_time
from pandas.tseries.timedeltas import to_timedelta
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
import pandas._join as _join
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = (self.freq.kwds.get('startingMonth',
self.freq.kwds.get('month', 12))
if self.freq else 12)
result = tslib.get_start_end_field(values, field, self.freqstr,
month_kw)
elif field in ['weekday_name']:
result = tslib.get_date_name_field(values, field)
return self._maybe_mask_results(result)
elif field in ['is_leap_year']:
# no need to mask NaT
return tslib.get_date_field(values, field)
else:
result = tslib.get_date_field(values, field)
return self._maybe_mask_results(result, convert='float64')
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if (isinstance(other, datetime) or
isinstance(other, compat.string_types)):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isnull(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz', None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',
**kwargs)
_inner_indexer = _join_i8_wrapper(_join.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_join.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_join.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_join.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = _index.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'date', 'time', 'microsecond',
'nanosecond', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'tz', 'freq', 'weekday_name',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
# This allows to later ensure that the 'copy' parameter is honored:
if isinstance(data, Index):
ref_to_data = data._data
else:
ref_to_data = data
if name is None and hasattr(data, 'name'):
name = data.name
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
# if dtype has an embeded tz, capture it
if dtype is not None:
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
dtz = getattr(dtype, 'tz', None)
if dtz is not None:
if tz is not None and str(tz) != str(dtz):
raise ValueError("cannot supply both a tz and a dtype"
" with a tz")
tz = dtz
except TypeError:
pass
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
elif isinstance(data, ABCSeries):
data = data._values
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data)):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
elif data.tz is None:
data = data.tz_localize(tz, ambiguous=ambiguous)
else:
# the tz's must match
if str(tz) != str(data.tz):
msg = ('data is already tz-aware {0}, unable to '
'set specified tz: {1}')
raise TypeError(msg.format(data.tz, tz))
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
else:
# must be integer dtype otherwise
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64)
subarr = data.view(_NS_DTYPE)
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
# dtype must be coerced to DatetimeTZDtype above
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None,
freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'dates does not conform to passed '
'frequency {1}'
.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not tslib.get_timezone(inferred_tz) == tslib.get_timezone(tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(_ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None,
dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if not getattr(values, 'dtype', None):
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs)
values = np.array(values, copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = tslib.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return tslib.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and
# therefore should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError(
'Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore
# PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
@cache_readonly
def _is_dates_only(self):
from pandas.formats.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
if other is tslib.NaT:
return self._nat_new(box=True)
raise TypeError("cannot add a datelike to a DatetimeIndex")
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError("DatetimeIndex subtraction must have the same "
"timezones or no timezones")
result = self._sub_datelike_dti(other)
elif isinstance(other, (tslib.Timestamp, datetime)):
other = Timestamp(other)
if other is tslib.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same "
"timezones or no timezones")
else:
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,
fill_value=tslib.iNaT)
else:
raise TypeError("cannot subtract DatetimeIndex and {typ}"
.format(typ=type(other).__name__))
return TimedeltaIndex(result, name=self.name, copy=False)
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = tslib.iNaT
return new_values.view('i8')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif is_datetime64_ns_dtype(dtype):
if self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif copy is True:
return self.copy()
return self
elif is_string_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = ("You must pass a freq argument as "
"current index has none.")
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calcuates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
.. versionadded:: 0.17.0
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq)
.to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex) and
self.offset == other.offset and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propagate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, l)
converted = tslib.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59,
59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(),
tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if ((reso in ['day', 'hour', 'minute'] and
not (self._resolution < Resolution.get_reso(reso) or
not is_monotonic)) or
(reso == 'second' and
not (self._resolution <= Resolution.RESO_SEC or
not is_monotonic))):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
t2.value < stamps[0]) or
((use_rhs and t1.value > stamps[-1] and
t2.value > stamps[-1])))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(
t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(
t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series),
key, tz=self.tz)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice', label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length (is_monotonic_decreasing gives True for empty index)
if self.is_monotonic_decreasing and len(self):
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq,
doc="get/set the frequncy of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. versionadded:: 0.18.1")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"Logical indicating if last day of year (defined by frequency)")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"Logical indicating if the date belongs to a leap year")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
return self._maybe_mask_results(_algos.arrmap_object(
self.asobject.values,
lambda x: np.nan if x is tslib.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
return self._maybe_mask_results(_algos.arrmap_object(
self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
@Substitution(klass='DatetimeIndex', value='key')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, key, side='left', sorter=None):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return period.resolution(self.asi8, self.tz)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = tslib.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone
.. versionadded:: 0.19.0
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM).
Return values of the index between two times. If start_time or
end_time are strings then tseres.tools.to_time is used to convert to
a time object.
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : TimeSeries
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = self.year
month = self.month
day = self.day
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
if freq == 'C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
|
mit
|
cristobaltapia/sajou
|
sajou/model.py
|
1
|
17578
|
#!/usr/bin/env python
# encoding: utf-8
"""
Defines the model classes for 2D and 3D models.
"""
__docformat__ = 'reStructuredText'
import numpy as np
import pandas as pd
import scipy.sparse as sparse
from sajou.materials import Material
from sajou.nodes import Node2D
from sajou.sections import BeamSection
from sajou.elements import (Beam2D, Spring2D)
class Model(object):
"""Defines a model object
Parameters
----------
name: str
name of the model
dimensionality: str
spacial dimensions used in the model ('2D' or '3D')
Attributes
----------
nodes: dict
dictionary with all the nodes of the system
beams: dict
dictionary with all the beams of the system
beam_sections: dict
dictionary with the beam sections defined in the model
materials: dict
dictionary with the materials defined in the model
n_nodes: int
number of nodes of the system
n_elements: int
number of beams in the system
n_materials: int
number of materials defined
n_dimensions: int
number of spacial dimensions of the model
n_dof_per_node: int
number of degrees of freedom per node
_name: str
name of the model
_dimensionality: str
spacial dimensions used in the model
"""
def __new__(cls, name, dimensionality):
if cls is Model:
if dimensionality == '2D':
return super(Model, cls).__new__(Model2D)
if dimensionality == '3D':
return super(Model, cls).__new__(Model3D)
else:
return super(Model, cls).__new__(cls, name, dimensionality)
def __init__(self, name, dimensionality):
self._name = name
self._dimensionality = dimensionality
# Node Freedome Allocation Table:
self.nfat = dict()
#
self.nodes = dict()
self.elements = dict()
self.beams = dict()
self.beam_sections = dict()
self.materials = dict()
#
self.n_nodes = 0
self.n_elements = 0
self.n_materials = 0
self._K = None # global stiffness matrix
self._P = None # load matrix
self._V = None # global displacement matrix
# Number of dimensions of the model
self.n_dimensions = None
# Number of dof per node. Initialized in the respective models
self.n_dof_per_node = None
# Specify dofs that are not active due to border conditions
self._dof_dirichlet = []
# Node Freedom Allocation Table
self._nfat = dict()
# Node Freedom Map Table:
# Stores the index of the first used DOF of a node in the global
# system.
self._nfmt = dict()
def material(self, name, data, type='isotropic'):
"""Function used to create a Material instance in the model
Parameters
----------
name: str
name of the material
data:
data for the material
type: str
type of the material
Returns
-------
sajou.Material
a Material instance
"""
material = Material(name=name, data=data, type=type)
# Add the material to the dictionary of materials in the current
# model
self.materials[name] = material
self.n_materials += 1
return material
def beam_section(self, name, material, data, type='rectangular'):
"""Function use to create a BeamSection instance in the model
Parameters
----------
name: str
name of the section
material: sajou.Material
material for the section
data:
data (see BeamSection class definition)
type:
type of the section (see BeamSection class definition)
Returns
-------
returns
a beam section instance
"""
# The material can be passed both as a string, corresponding to
# a key of the material dictionary of the model, or as a
# material instance directly.
if isinstance(material, str):
material_section = self.materials[material]
else:
material_section = material
section = BeamSection(name=name, material=material_section, data=data,
type=type)
# Add section to the list of beam sections
self.beam_sections[name] = section
return section
def bc(self, node, type='displacement', coord_system='global', **kwargs):
"""Introduces a border condition to the node.
Parameters
----------
node: sajou.Node
Node to which the border condition will be applied
type: str
type of border condition
- Options:
``'displacement'``, ``...``
coord_system:
spcifies the coordinate system to be used when applying the BC
**kwargs:
keyword arguments. At least one of the following parameters must
be supplied
Keyword Arguments
-----------------
v1: float
displacement in the direction 1
v2: float
displacement in the direction 2
v3: float
displacement in the direction 3
r1: float
rotation in the direction 1
r2: float
rotation in the direction 2
r3: float
rotation in the direction 3
Returns
-------
bool
True if successful
"""
# TODO: currently only in global coordintaes. Implement
# transformation in other coordinate systems.
# Get the BC applied
v1 = kwargs.get('v1', None)
v2 = kwargs.get('v2', None)
v3 = kwargs.get('v3', None)
r1 = kwargs.get('r1', None)
r2 = kwargs.get('r2', None)
r3 = kwargs.get('r3', None)
# For the case of the 2D model
if self.n_dof_per_node == 3:
list_dof = [v1, v2, r3]
for dof, curr_bc in enumerate(list_dof):
if curr_bc is not None:
node.set_BC(dof=dof, val=curr_bc)
# For the case of the 3D model
elif self.n_dof_per_node == 6:
list_dof = [v1, v2, v3, r1, r2, r3]
for dof, curr_bc in enumerate(list_dof):
if curr_bc is not None:
node.set_BC(dof=dof, val=curr_bc)
return True
# TODO: there has to give a 'Load' class to handle the different
# type of loads.
def load(self, node, coord_system='global', **kwargs):
"""Introduces a Load in the given direction according to the selected
coordinate system at the specified node.
Parameters
----------
node: sajou.Node
a Node instance
coordinate:
coordinate system
**kwargs:
keyword arguments. The BC is defined for the different degree of
freedom (*dof*) available to the node.
At least one of the following parameters must be supplied:
Keyword Arguments
-----------------
f1: float
force in direction 1
f2: float
force in direction 2
f3: float
force in direction 3
m1: float
moment in direction 1
m2: float
moment in direction 2
m3: float
moment in direction 3
Returns
-------
sajou.Load
the instance of the Load object created
"""
# TODO: currently only in global coordintaes. Implement
# transformation in other coordinate systems.
# Get the BC applied
f1 = kwargs.get('f1', None)
f2 = kwargs.get('f2', None)
f3 = kwargs.get('f3', None)
m1 = kwargs.get('m1', None)
m2 = kwargs.get('m2', None)
m3 = kwargs.get('m3', None)
# For the case of the 2D model
if self.n_dof_per_node == 3:
list_dof = [f1, f2, m3]
for dof, curr_force in enumerate(list_dof):
if curr_force is not None:
node.set_Load(dof=dof, val=curr_force)
# For the case of the 3D model
elif self.n_dof_per_node == 6:
list_dof = [f1, f2, f3, m1, m2, m3]
for dof, curr_force in enumerate(list_dof):
if curr_force is not None:
node.set_Load(dof=dof, val=curr_force)
return None
def export_model_data(self):
"""Export all the data of the model. This means the nodes, elements,
border conditions and forces are exported to a ModelData object.
Returns
-------
sajou.model.ModelData
the data of the whole analyzed model
"""
model_data = ModelData(self)
return model_data
def add_hinge(self, node):
"""Add hinge to the specified node. Also supports list of nodes
Parameters
----------
node: sajou.Node
Node instance or list of node instances
Returns
-------
bool
TODO
Todo
----
This function still needs work
"""
#FIXME: not yet implemented!
if isinstance(node, list):
for node_i in node:
node_i.add_hinge()
else:
node.add_hinge()
return True
def __str__(self):
"""
Printable string
"""
return str(
'Model: Name: {name}, Nodes: {n_nodes}, Elements: {n_elements}'.format(
name=self._name, n_nodes=self.n_nodes, n_elements=self.n_elements))
def __repr__(self):
"""
Returns the printable string for this object
"""
return str(
'Model: Name: {name}, Nodes: {n_nodes}, Beams: {n_elements}'.format(
name=self._name, n_nodes=self.n_nodes, n_elements=self.n_elements))
class Model2D(Model):
"""
Subclass of the 'Model' class. It is intended to be used for the 2-dimensional
models of frame structures.
Allocation of DOFs in each node:
[1 2 3] = [ux, uy, rz]
Parameters
----------
name: str
name of the model
Attributes
----------
n_dimensions: int
number of spacial dimensions (2 for Model2D)
n_dof_per_node: int
number of degrees of freedom per node
"""
def __init__(self, name, dimensionality='2D'):
dimensionality = '2D'
Model.__init__(self, name, dimensionality)
# Numer of dimensions
self.n_dimensions = 2
# Number of degrees of freedom per node:
self.n_dof_per_node = 3
def node(self, x, y):
"""2D implementation of the Node.
Parameters
----------
x: float
x position
y: float
y position
Returns
-------
sajou.Node
the node created
"""
# A coordinate z=0 is passed to initiate the Node Instance
node = Node2D(x=x, y=y, z=0.0, number=self.n_nodes)
self.nodes[node.number] = node
self.n_nodes += 1
return node
def beam(self, node1, node2):
"""Define a beam element between two nodes.
Parameters
----------
node1: sajou.Node
first node
node2: sajou.Node
second node
Returns
-------
sajou.Beam
the beam element created
"""
beam = Beam2D(node1=node1, node2=node2, number=self.n_elements)
self.beams[beam.number] = beam
# add to the element repository of the model
self.elements[beam.number] = beam
# add to the element counter
self.n_elements += 1
return beam
def spring(self, node1, node2, K):
"""Define a spring element between two nodes
Parameters
----------
node1: Node instance
first node
node2: Node instance
second node
K: float
elastic constant of the spring
Returns
-------
sajou.Spring2D:
A Sprind 2D instance
"""
spring = Spring2D(node1=node1, node2=node2, number=self.n_elements)
# assign the elastic constant
spring.assign_elastic_constant(K)
# add to the element repository of the model
self.elements[spring.number] = spring
# add to the element counter
self.n_elements += 1
return spring
def distributed_load(self, elements, **kwargs):
"""Add a distributed load to a list of beam elements.
A list of elements has to be supplied for the first variable. The rest of the
variables are exactly the same as in the 'distributed_load' function of the
corresponding elements.
Parameters
----------
elements: list
list of beam elements
p1: float
value of the force at start node
p2: float
value of the force at end node
direction: int
direction of the applied load (default: *2*)
coord_system: str
coordinate system (default: global)
Returns
-------
bool
TODO
"""
for curr_elem in elements:
# Add distributed load
curr_elem.distributed_load(**kwargs)
return True
class Model3D(Model):
"""
Subclass of the 'Model' class. It is intended to be used for the 3-dimensional
models of frame structures.
Allocation of DOFs in each node:
[1 2 3 4 5 6] = [ux, uy, uz, rx, ry, rz]
"""
def __init__(self, name, dimensionality='3D'):
dimensionality = '3D'
Model.__init__(self, name, dimensionality)
self.n_dof_per_node = 6 # dof per node
def node(self, x, y, z):
"""
3D implementation of the Node.
Parameters
----------
x: float
x-position of the node
y: float
y-position of the node
z: float
z-position of the node
Returns
-------
Node: instance of Node
"""
node = Node(x=x, y=y, z=z, number=self.n_nodes)
self.nodes[node.number] = node
self.n_nodes += 1
return node
def beam(self, node1, node2):
"""Define a line between two nodes.
:node1: first node
:node2: second node
"""
line = Beam3D(node1=node1, node2=node2, number=self.n_elements)
self.beams[line.number] = line
self.n_elements += 1
return line
class ModelData(object):
"""Object to store the data of a model object. It is used to pass it to the results object"""
def __init__(self, model):
"""Initializes the ModelData instance
:model: a Model instance
"""
from copy import copy
self._name = model._name
self._dimensionality = model._dimensionality
self.nodes = copy(model.nodes)
self.beams = copy(model.beams)
self.elements = copy(model.elements)
self.beam_sections = copy(model.beam_sections)
self.materials = copy(model.materials)
self.n_nodes = model.n_nodes
self.n_elements = model.n_elements
self.n_dimensions = model.n_dimensions
self.n_materials = model.n_materials
# Number of dof per node. Initialized in the respective models
self.n_dof_per_node = model.n_dof_per_node
# Specify dofs that are not active due to border conditions
self._dof_dirichlet = copy(model._dof_dirichlet)
def get_dataframe_of_node_coords(model, nodes='all'):
"""Return a pandas dataframe with coordinates of selected nodes of the model
Parameters
----------
nodes: list, str
list of nodes or 'all'
Returns
-------
DataFrame:
DataFrame with the coordinates of the nodes
"""
dimensions = model.n_dimensions
#
if nodes == 'all':
nodes = [i for i, n in model.nodes.items()]
ar_coords = np.zeros((len(nodes), dimensions), dtype=np.float)
index_nodes = np.zeros(len(nodes), dtype=np.int)
for ix_node, curr_node in enumerate(nodes):
node_i = model.nodes[curr_node]
ar_coords[ix_node, :] = node_i.coords
index_nodes[ix_node] = curr_node
# Set coordinate labels according to the model
if dimensions == 2:
index_label = ['x', 'y']
else:
index_label = ['x', 'y', 'z']
# Append to the Dta Frame
df_coords = pd.DataFrame(data=ar_coords, index=index_nodes,
dtype=np.float64, columns=index_label)
return df_coords
def get_node_coords(model, nodes='all'):
"""Return a dictionary with coordinates of selected nodes of the model
Parameters
----------
nodes: list, str
list of nodes or 'all'
Returns
-------
DataFrame:
DataFrame with the coordinates of the nodes
"""
dimensions = model.n_dimensions
#
if nodes == 'all':
nodes = [n for i, n in model.nodes.items()]
# initialize empty dictionary to store the coordinates
dict_coords = dict()
# loop over every node
for node_i in nodes:
dict_coords[node_i.number] = node_i.coords
return dict_coords
|
mit
|
rudhir-upretee/Sumo_With_Netsim
|
tools/visualization/mpl_dump_onNet.py
|
2
|
17968
|
#!/usr/bin/env python
"""
@file mpl_dump_onNet.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: mpl_dump_onNet.py 12595 2012-08-24 14:07:33Z dkrajzew $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
matplotlib has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
from matplotlib import rcParams
from pylab import *
import os, string, sys, StringIO
import math
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val/16)] + hex[int(val - int(val/16)*16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0])*16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap)-1):
if colormap[i+1][0]>val:
scale = (val - colormap[i][0]) / (colormap[i+1][0] - colormap[i][0])
r = colormap[i][1][0] + (colormap[i+1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + (colormap[i+1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + (colormap[i+1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
ret = []
defs = mapDef.split(",")
for d in defs:
(value, color) = d.split(":")
r = color[1:3]
g = color[3:5]
b = color[5:7]
ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
return ret
class NetReader(handler.ContentHandler):
"""Reads a network, storing the edge geometries, lane numbers and max. speeds"""
def __init__(self):
self._id = ''
self._edge2lanes = {}
self._edge2speed = {}
self._edge2shape = {}
self._edge2from = {}
self._edge2to = {}
self._node2x = {}
self._node2y = {}
self._currentShapes = []
self._parseLane = False
def startElement(self, name, attrs):
self._parseLane = False
if name == 'edge':
if not attrs.has_key('function') or attrs['function'] != 'internal':
self._id = attrs['id']
self._edge2from[attrs['id']] = attrs['from']
self._edge2to[attrs['id']] = attrs['to']
self._edge2lanes[attrs['id']] = 0
self._currentShapes = []
else:
self._id = ""
if name == 'lane' and self._id!="":
self._edge2speed[self._id] = float(attrs['speed'])
self._edge2lanes[self._id] = self._edge2lanes[self._id] + 1
self._parseLane = True
self._currentShapes.append(attrs["shape"])
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = attrs['x']
self._node2y[attrs['id']] = attrs['y']
else:
self._id = ""
def endElement(self, name):
if self._parseLane:
self._parseLane = False
if name == 'edge' and self._id!="":
noShapes = len(self._currentShapes)
if noShapes%2 == 1 and noShapes>0:
self._edge2shape[self._id] = self._currentShapes[int(noShapes/2)]
elif noShapes%2 == 0 and len(self._currentShapes[0])!=2:
cshapes = []
minLen = -1
for i in self._currentShapes:
cshape = []
es = i.split(" ")
for e in es:
p = e.split(",")
cshape.append((float(p[0]), float(p[1])))
cshapes.append(cshape)
if minLen==-1 or minLen>len(cshape):
minLen = len(cshape)
self._edge2shape[self._id] = ""
if minLen>2:
for i in range(0, minLen):
x = 0.
y = 0.
for j in range(0, noShapes):
x = x + cshapes[j][i][0]
y = y + cshapes[j][i][1]
x = x / float(noShapes)
y = y / float(noShapes)
if self._edge2shape[self._id] != "":
self._edge2shape[self._id] = self._edge2shape[self._id] + " "
self._edge2shape[self._id] = self._edge2shape[self._id] + str(x) + "," + str(y)
def plotData(self, weights, options, values1, values2, saveName, colorMap):
edge2plotLines = {}
edge2plotColors = {}
edge2plotWidth = {}
xmin = 10000000.
xmax = -10000000.
ymin = 10000000.
ymax = -10000000.
min_width = 0
if options.min_width:
min_width = options.min_width
for edge in self._edge2from:
# compute shape
xs = []
ys = []
if edge not in self._edge2shape or self._edge2shape[edge]=="":
xs.append(float(self._node2x[self._edge2from[edge]]))
xs.append(float(self._node2x[self._edge2to[edge]]))
ys.append(float(self._node2y[self._edge2from[edge]]))
ys.append(float(self._node2y[self._edge2to[edge]]))
else:
shape = self._edge2shape[edge].split(" ")
l = []
for s in shape:
p = s.split(",")
xs.append(float(p[0]))
ys.append(float(p[1]))
for x in xs:
if x<xmin:
xmin = x
if x>xmax:
xmax = x
for y in ys:
if y<ymin:
ymin = y
if y>ymax:
ymax = y
# save shape
edge2plotLines[edge] = (xs, ys)
# compute color
if edge in values2:
c = values2[edge]
else:
c = 0
edge2plotColors[edge] = toColor(c, colorMap)
# compute width
if edge in values1:
w = values1[edge]
if w>0:
w = 10. * math.log(1 + values1[edge]) + min_width
else:
w = min_width
if options.max_width and w>options.max_width:
w = options.max_width
if w<min_width:
w = min_width
edge2plotWidth[edge] = w
else:
edge2plotWidth[edge] = min_width
if options.verbose:
print "x-limits: " + str(xmin) + " - " + str(xmax)
print "y-limits: " + str(ymin) + " - " + str(ymax)
if not options.show:
rcParams['backend'] = 'Agg'
# set figure size
if options.size and not options.show:
f = figure(figsize=(options.size.split(",")))
else:
f = figure()
for edge in edge2plotLines:
plot(edge2plotLines[edge][0], edge2plotLines[edge][1], color=edge2plotColors[edge], linewidth=edge2plotWidth[edge])
# set axes
if options.xticks!="":
(xb, xe, xd, xs) = options.xticks.split(",")
xticks(arange(xb, xe, xd), size = xs)
if options.yticks!="":
(yb, ye, yd, ys) = options.yticks.split(",")
yticks(arange(yb, ye, yd), size = ys)
if options.xlim!="":
(xb, xe) = options.xlim.split(",")
xlim(int(xb), int(xe))
else:
xlim(xmin, xmax)
if options.ylim!="":
(yb, ye) = options.ylim.split(",")
ylim(int(yb), int(ye))
else:
ylim(ymin, ymax)
if saveName:
savefig(saveName);
if options.show:
show()
def plot(self, weights, options, colorMap):
self._minValue1 = weights._minValue1
self._minValue2 = weights._minValue2
self._maxValue1 = weights._maxValue1
self._maxValue2 = weights._maxValue2
if options.join:
self.plotData(weights, options, weights._edge2value1, weights._edge2value2, options.output, colorMap)
else:
for i in weights._intervalBegins:
if options.verbose:
print " Processing step %d..." % i
output = options.output
if output:
output = output.replace("HERE", "%")
output = output % i
self.plotData(weights, options, weights._unaggEdge2value1[i], weights._unaggEdge2value2[i], output, colorMap )
def knowsEdge(self, id):
return id in self._edge2from
class WeightsReader(handler.ContentHandler):
"""Reads the dump file"""
def __init__(self, net, value1, value2):
self._id = ''
self._edge2value2 = {}
self._edge2value1 = {}
self._edge2no1 = {}
self._edge2no2 = {}
self._net = net
self._intervalBegins = []
self._unaggEdge2value2 = {}
self._unaggEdge2value1 = {}
self._beginTime = -1
self._value1 = value1
self._value2 = value2
def startElement(self, name, attrs):
if name == 'interval':
self._beginTime = int(attrs['begin'])
self._intervalBegins.append(self._beginTime)
self._unaggEdge2value2[self._beginTime] = {}
self._unaggEdge2value1[self._beginTime] = {}
if name == 'edge':
if self._net.knowsEdge(attrs['id']):
self._id = attrs['id']
if self._id not in self._edge2value2:
self._edge2value2[self._id] = 0
self._edge2value1[self._id] = 0
self._edge2no1[self._id] = 0
self._edge2no2[self._id] = 0
value1 = self._value1
if attrs.has_key(value1):
value1 = float(attrs[value1])
self._edge2no1[self._id] = self._edge2no1[self._id] + 1
else:
value1 = float(value1)
self._edge2value1[self._id] = self._edge2value1[self._id] + value1
self._unaggEdge2value1[self._beginTime][self._id] = value1
value2 = self._value2
if attrs.has_key(value2):
value2 = float(attrs[value2])
self._edge2no2[self._id] = self._edge2no2[self._id] + 1
else:
value2 = float(value2)
self._edge2value2[self._id] = self._edge2value2[self._id] + value2
self._unaggEdge2value2[self._beginTime][self._id] = value2
def updateExtrema(self, values1ByEdge, values2ByEdge):
for edge in values1ByEdge:
if self._minValue1==-1 or self._minValue1>values1ByEdge[edge]:
self._minValue1 = values1ByEdge[edge]
if self._maxValue1==-1 or self._maxValue1<values1ByEdge[edge]:
self._maxValue1 = values1ByEdge[edge]
if self._minValue2==-1 or self._minValue2>values2ByEdge[edge]:
self._minValue2 = values2ByEdge[edge]
if self._maxValue2==-1 or self._maxValue2<values2ByEdge[edge]:
self._maxValue2 = values2ByEdge[edge]
def valueDependantNorm(self, values, minV, maxV, tendency, percSpeed):
if tendency:
for edge in self._edge2value2:
if values[edge]<0:
values[edge] = 0
else:
values[edge] = 1
elif percSpeed:
for edge in self._edge2value2:
values[edge] = (values[edge] / self._net._edge2speed[edge])
elif minV!=maxV:
for edge in self._edge2value2:
values[edge] = (values[edge] - minV) / (maxV - minV)
def norm(self, tendency, percSpeed):
self._minValue1 = -1
self._maxValue1 = -1
self._minValue2 = -1
self._maxValue2 = -1
# compute mean value if join is set
if options.join:
for edge in self._edge2value2:
if float(self._edge2no1[edge])!=0:
self._edge2value1[edge] = float(self._edge2value1[edge]) / float(self._edge2no1[edge])
else:
self._edge2value1[edge] = float(self._edge2value1[edge])
if float(self._edge2no2[edge])!=0:
self._edge2value2[edge] = float(self._edge2value2[edge]) / float(self._edge2no2[edge])
else:
self._edge2value2[edge] = float(self._edge2value2[edge])
# compute min/max
if options.join:
self.updateExtrema(self._edge2value1, self._edge2value2)
else:
for i in weights._intervalBegins:
self.updateExtrema(self._unaggEdge2value1[i], self._unaggEdge2value2[i])
# norm
if options.verbose:
print "w range: " + str(self._minValue1) + " - " + str(self._maxValue1)
print "c range: " + str(self._minValue2) + " - " + str(self._maxValue2)
if options.join:
self.valueDependantNorm(self._edge2value1, self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._edge2value2, self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
else:
for i in weights._intervalBegins:
self.valueDependantNorm(self._unaggEdge2value1[i], self._minValue1, self._maxValue1, False, percSpeed and self._value1=="speed")
self.valueDependantNorm(self._unaggEdge2value2[i], self._minValue2, self._maxValue2, tendency, percSpeed and self._value2=="speed")
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-n", "--net-file", dest="net",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-d", "--dump", dest="dump",
help="dump file to use", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="(base) name for the output", metavar="FILE")
# data handling
optParser.add_option("-j", "--join", action="store_true", dest="join",
default=False, help="sums up values from all read intervals")
optParser.add_option("-w", "--min-width", dest="min_width",
type="float", help="sets minimum line width")
optParser.add_option("-W", "--max-width", dest="max_width",
type="float", help="sets maximum line width")
optParser.add_option("-c", "--min-color", dest="min_color",
type="float", help="sets minimum color (between 0 and 1)")
optParser.add_option("-C", "--max-color", dest="max_color",
type="float", help="sets maximum color (between 0 and 1)")
optParser.add_option("--tendency-coloring", action="store_true", dest="tendency_coloring",
default=False, help="show only 0/1 color for egative/positive values")
optParser.add_option("--percentage-speed", action="store_true", dest="percentage_speed",
default=False, help="speed is normed to maximum allowed speed on an edge")
optParser.add_option("--values", dest="values",
type="string", default="entered,speed", help="which values shall be parsed")
optParser.add_option("--color-map", dest="colormap",
type="string", default="0:#ff0000,.5:#ffff00,1:#00ff00", help="Defines the color map")
# axes/legend
optParser.add_option("--xticks", dest="xticks",type="string", default="",
help="defines ticks on x-axis")
optParser.add_option("--yticks", dest="yticks",type="string", default="",
help="defines ticks on y-axis")
optParser.add_option("--xlim", dest="xlim",type="string", default="",
help="defines x-axis range")
optParser.add_option("--ylim", dest="ylim",type="string", default="",
help="defines y-axis range")
# output
optParser.add_option("--size", dest="size",type="string", default="",
help="defines the output size")
# processing
optParser.add_option("-s", "--show", action="store_true", dest="show",
default=False, help="shows each plot after generating it")
# parse options
(options, args) = optParser.parse_args()
# check set options
if not options.show and not options.output:
print "Neither show (--show) not write (--output <FILE>)? Exiting..."
exit()
# init color map
colorMap = parseColorMap(options.colormap)
# read network
if options.verbose:
print "Reading net..."
parser = make_parser()
net = NetReader()
parser.setContentHandler(net)
parser.parse(options.net)
# read weights
if options.verbose:
print "Reading weights..."
mValues = options.values.split(",")
weights = WeightsReader(net, mValues[0], mValues[1])
parser.setContentHandler(weights)
parser.parse(options.dump)
# process
if options.verbose:
print "Norming weights..."
weights.norm(options.tendency_coloring, options.percentage_speed)
if options.verbose:
print "Plotting..."
net.plot(weights, options, colorMap)
|
gpl-3.0
|
Abstrakten/webIntCourse
|
social-networks/find-communities.py
|
1
|
2172
|
import networkx as nx
from numpy.linalg import eig
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import happyfuntokenizer
color_map = {
0:'y',
1:'r',
2:'b',
3:'k',
4:'m',
5:'c',
6:'g',
7:'yellow',
8:'brown',
}
# Laplacian method
def find_communities(G):
L = nx.laplacian_matrix(G).todense()
eig_values, eig_matrix = eig(L)
# find second minimum in eigen values
second_min_idx, second_min = sorted(enumerate(eig_values), key=lambda x: x[1])[1]
print(second_min)
print(second_min_idx)
# find the correct coloumn
target_coloumn = eig_matrix.transpose()[second_min_idx].transpose()
for x in target_coloumn:
x = x.real
print(target_coloumn.shape)
# pair graph node with eigen vector value
paired_target_coloumn = zip(G.nodes(), target_coloumn)
# for x,y in (paired_target_coloumn):
# print(x, y)
k = 4
# cluster using knn
km = KMeans(n_clusters=k, init='k-means++', max_iter=100, n_init=1)
km.fit(target_coloumn)
communities = {}
for node, cluster in zip(G.nodes(), km.labels_):
#print(color_map[cluster])
G.node[node]['cluster'] = cluster
lst = communities.get(cluster, [])
lst.append(node)
communities[cluster] = lst
#print(cluster)
return communities
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
with open("friendships.txt") as inFile:
lines = inFile.read().split("\n")
data_rows = [x for x in chunks(lines, 5)]
# populate graph
G = nx.Graph()
for row in data_rows:
name = row[0].split()[1]
friends = row[1].split("\t")[1:]
# summary
summary = row[2]
# review
review = row[3]
sentences = review.split(".")
for friend in friends:
G.add_edge(name, friend)
communities = find_communities(G)
print(communities)
nx.draw_spring(G, node_color=[color_map[G.node[node]['cluster']] for node in G])
plt.show()
|
gpl-3.0
|
mojoboss/scikit-learn
|
examples/plot_isotonic_regression.py
|
303
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
jhnnsnk/nest-simulator
|
pynest/examples/mc_neuron.py
|
12
|
7554
|
# -*- coding: utf-8 -*-
#
# mc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Multi-compartment neuron example
--------------------------------
Simple example of how to use the three-compartment ``iaf_cond_alpha_mc``
neuron model.
Three stimulation paradigms are illustrated:
- externally applied current, one compartment at a time
- spikes impinging on each compartment, one at a time
- rheobase current injected to soma causing output spikes
Voltage and synaptic conductance traces are shown for all compartments.
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
##############################################################################
# We then extract the receptor types and the list of recordable quantities
# from the neuron model. Receptor types and recordable quantities uniquely
# define the receptor type and the compartment while establishing synaptic
# connections or assigning multimeters.
syns = nest.GetDefaults('iaf_cond_alpha_mc')['receptor_types']
print("iaf_cond_alpha_mc receptor_types: {0}".format(syns))
rqs = nest.GetDefaults('iaf_cond_alpha_mc')['recordables']
print("iaf_cond_alpha_mc recordables : {0}".format(rqs))
###############################################################################
# The simulation parameters are assigned to variables.
nest.SetDefaults('iaf_cond_alpha_mc',
{'V_th': -60.0, # threshold potential
'V_reset': -65.0, # reset potential
't_ref': 10.0, # refractory period
'g_sp': 5.0, # somato-proximal coupling conductance
'soma': {'g_L': 12.0}, # somatic leak conductance
# proximal excitatory and inhibitory synaptic time constants
'proximal': {'tau_syn_ex': 1.0,
'tau_syn_in': 5.0},
'distal': {'C_m': 90.0} # distal capacitance
})
###############################################################################
# The nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
n = nest.Create('iaf_cond_alpha_mc')
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create('multimeter', params={'record_from': rqs, 'interval': 0.1})
nest.Connect(mm, n)
###############################################################################
# We create one current generator per compartment and configure a stimulus
# regime that drives distal, proximal and soma dendrites, in that order.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current.
cgs = nest.Create('dc_generator', 3)
cgs[0].set(start=250.0, stop=300.0, amplitude=50.0) # soma
cgs[1].set(start=150.0, stop=200.0, amplitude=-50.0) # proxim.
cgs[2].set(start=50.0, stop=100.0, amplitude=100.0) # distal
###############################################################################
# Generators are then connected to the correct compartments. Specification of
# the ``receptor_type`` uniquely defines the target compartment and receptor.
nest.Connect(cgs[0], n, syn_spec={'receptor_type': syns['soma_curr']})
nest.Connect(cgs[1], n, syn_spec={'receptor_type': syns['proximal_curr']})
nest.Connect(cgs[2], n, syn_spec={'receptor_type': syns['distal_curr']})
###############################################################################
# We create one excitatory and one inhibitory spike generator per compartment
# and configure a regime that drives distal, proximal and soma dendrites, in
# that order, alternating the excitatory and inhibitory spike generators.
sgs = nest.Create('spike_generator', 6)
sgs[0].spike_times = [600.0, 620.0] # soma excitatory
sgs[1].spike_times = [610.0, 630.0] # soma inhibitory
sgs[2].spike_times = [500.0, 520.0] # proximal excitatory
sgs[3].spike_times = [510.0, 530.0] # proximal inhibitory
sgs[4].spike_times = [400.0, 420.0] # distal excitatory
sgs[5].spike_times = [410.0, 430.0] # distal inhibitory
###############################################################################
# Connect generators to correct compartments in the same way as in case of
# current generator
nest.Connect(sgs[0], n, syn_spec={'receptor_type': syns['soma_exc']})
nest.Connect(sgs[1], n, syn_spec={'receptor_type': syns['soma_inh']})
nest.Connect(sgs[2], n, syn_spec={'receptor_type': syns['proximal_exc']})
nest.Connect(sgs[3], n, syn_spec={'receptor_type': syns['proximal_inh']})
nest.Connect(sgs[4], n, syn_spec={'receptor_type': syns['distal_exc']})
nest.Connect(sgs[5], n, syn_spec={'receptor_type': syns['distal_inh']})
###############################################################################
# Run the simulation for 700 ms.
nest.Simulate(700)
###############################################################################
# Now we set the intrinsic current of soma to 150 pA to make the neuron spike.
n.set({'soma': {'I_e': 150.}})
###############################################################################
# We simulate the network for another 300 ms and retrieve recorded data from
# the multimeter
nest.Simulate(300)
rec = mm.events
###############################################################################
# We create an array with the time points when the quantities were actually
# recorded
t = rec['times']
###############################################################################
# We plot the time traces of the membrane potential and the state of each
# membrane potential for soma, proximal, and distal dendrites (`V_m.s`, `V_m.p`
# and `V_m.d`).
plt.figure()
plt.subplot(211)
plt.plot(t, rec['V_m.s'], t, rec['V_m.p'], t, rec['V_m.d'])
plt.legend(('Soma', 'Proximal dendrite', 'Distal dendrite'),
loc='lower right')
plt.axis([0, 1000, -76, -59])
plt.ylabel('Membrane potential [mV]')
plt.title('Responses of iaf_cond_alpha_mc neuron')
###############################################################################
# Finally, we plot the time traces of the synaptic conductance measured in
# each compartment.
plt.subplot(212)
plt.plot(t, rec['g_ex.s'], 'b-', t, rec['g_ex.p'], 'g-',
t, rec['g_ex.d'], 'r-')
plt.plot(t, rec['g_in.s'], 'b--', t, rec['g_in.p'], 'g--',
t, rec['g_in.d'], 'r--')
plt.legend(('g_ex.s', 'g_ex.p', 'g_in.d', 'g_in.s', 'g_in.p', 'g_in.d'))
plt.axis([350, 700, 0, 1.15])
plt.xlabel('Time [ms]')
plt.ylabel('Synaptic conductance [nS]')
plt.show()
|
gpl-2.0
|
Mctigger/KagglePlanetPytorch
|
nn_finetune_resnet_50.py
|
1
|
4493
|
import os
import sys
from itertools import chain
import numpy as np
import torchvision.models
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
import torch.nn.init
from torch.utils.data import DataLoader
from torchsample.modules import ModuleTrainer
from torchsample.callbacks import CSVLogger, LearningRateScheduler
import sklearn.model_selection
import paths
import labels
import transforms
import callbacks
from datasets import KaggleAmazonJPGDataset
name = os.path.basename(sys.argv[0])[:-3]
def generate_model():
class MyModel(nn.Module):
def __init__(self, pretrained_model):
super(MyModel, self).__init__()
self.pretrained_model = pretrained_model
self.layer1 = pretrained_model.layer1
self.layer2 = pretrained_model.layer2
self.layer3 = pretrained_model.layer3
self.layer4 = pretrained_model.layer4
pretrained_model.avgpool = nn.AvgPool2d(8)
classifier = [
nn.Linear(pretrained_model.fc.in_features, 17),
]
self.classifier = nn.Sequential(*classifier)
pretrained_model.fc = self.classifier
def forward(self, x):
return F.sigmoid(self.pretrained_model(x))
return MyModel(torchvision.models.resnet50(pretrained=True))
random_state = 1
labels_df = labels.get_labels_df()
kf = sklearn.model_selection.KFold(n_splits=5, shuffle=True, random_state=random_state)
split = kf.split(labels_df)
def train_net(train, val, model, name):
transformations_train = transforms.apply_chain([
transforms.random_fliplr(),
transforms.random_flipud(),
transforms.augment(),
torchvision.transforms.ToTensor()
])
transformations_val = transforms.apply_chain([
torchvision.transforms.ToTensor(),
])
dset_train = KaggleAmazonJPGDataset(train, paths.train_jpg, transformations_train, divide=False)
train_loader = DataLoader(dset_train,
batch_size=64,
shuffle=True,
num_workers=10,
pin_memory=True)
dset_val = KaggleAmazonJPGDataset(val, paths.train_jpg, transformations_val, divide=False)
val_loader = DataLoader(dset_val,
batch_size=64,
num_workers=10,
pin_memory=True)
ignored_params = list(map(id, chain(
model.classifier.parameters(),
model.layer1.parameters(),
model.layer2.parameters(),
model.layer3.parameters(),
model.layer4.parameters()
)))
base_params = filter(lambda p: id(p) not in ignored_params,
model.parameters())
optimizer = optim.Adam([
{'params': base_params},
{'params': model.layer1.parameters()},
{'params': model.layer2.parameters()},
{'params': model.layer3.parameters()},
{'params': model.layer4.parameters()},
{'params': model.classifier.parameters()}
], lr=0, weight_decay=0.0005)
trainer = ModuleTrainer(model)
def schedule(current_epoch, current_lrs, **logs):
lrs = [1e-3, 1e-4, 1e-5]
epochs = [0, 2, 10]
for lr, epoch in zip(lrs, epochs):
if current_epoch >= epoch:
current_lrs[5] = lr
if current_epoch >= 1:
current_lrs[4] = lr * 0.4
current_lrs[3] = lr * 0.2
current_lrs[2] = lr * 0.1
current_lrs[1] = lr * 0.05
current_lrs[0] = lr * 0.01
return current_lrs
trainer.set_callbacks([
callbacks.ModelCheckpoint(
paths.models,
name,
save_best_only=False,
saving_strategy=lambda epoch: True
),
CSVLogger('./logs/' + name),
LearningRateScheduler(schedule)
])
trainer.compile(loss=nn.BCELoss(),
optimizer=optimizer)
trainer.fit_loader(train_loader,
val_loader,
nb_epoch=35,
verbose=1,
cuda_device=0)
if __name__ == "__main__":
for i, (train_idx, val_idx) in enumerate(split):
name = os.path.basename(sys.argv[0])[:-3] + '-split_' + str(i)
train_net(labels_df.ix[train_idx], labels_df.ix[val_idx], generate_model(), name)
|
mit
|
BrownDwarf/Starfish
|
attic/cheb.py
|
2
|
2152
|
import matplotlib.pyplot as plt
from numpy.polynomial import Chebyshev as Ch
import numpy as np
import model as m
def test_chebyshev():
'''Domain controls the x-range, while window controls the y-range.'''
coef = np.array([0., 1.])
#coef2 = np.array([0.,0.,1,-1,0])
myCh = Ch(coef, window=[-10, 10])
#Domain c
#myCh2 = Ch(coef2)
#xs = np.linspace(0,3.)
x0 = np.linspace(-1, 1)
plt.plot(x0, myCh(x0))
#plt.plot(x0, myCh2(x0))
#plt.plot(xs, myCh2(xs))
plt.show()
#test_chebyshev()
xs = np.arange(2299)
T0 = np.ones_like(xs)
Ch1 = Ch([0,1], domain=[0,2298])
T1 = Ch1(xs)
Ch2 = Ch([0,0,1],domain=[0,2298])
T2 = Ch2(xs)
Ch3 = Ch([0,0,0,1],domain=[0,2298])
T3 = Ch3(xs)
T = np.array([T0,T1,T2,T3]) #multiply this by the flux and sigma vector for each order
TT = np.einsum("in,jn->ijn",T,T)
c = np.array([1,0,0,0])
orders = [21,22]
wls = m.wls
fls = m.fls
sigmas = m.sigmas #has shape (51, 2299), a sigma array for each order
fmods = m.model(wls, 6001,3.5,0.0, 3, 0.0, 1e-10)
#fls = m.model(wls,6020,3.6,40,2e-27)
TT = np.einsum("in,jn->ijn",T,T)
mu = np.array([1,0,0,0])
sigmac = 0.2
D = sigmac**(-2) * np.eye(4)
Dmu = np.einsum("ij,j->j",D,mu)
muDmu = np.einsum("j,j->",mu,Dmu)
a= fmods**2/sigmas**2
A = np.einsum("in,jkn->ijk",a,TT)
#add in prior
A = A + D
detA = np.array(list(map(np.linalg.det, A)))
invA = np.array(list(map(np.linalg.inv, A)))
b = fmods * fls / sigmas**2
B = np.einsum("in,jn->ij",b,T)
B = B + Dmu
g = -0.5 * fls**2/sigmas**2
G = np.einsum("ij->i",g)
G = G - 0.5 * muDmu
#A,B,G are correct
invAB = np.einsum("ijk,ik->ij",invA,B)
BAB = np.einsum("ij,ij->i",B,invAB)
#these are now correct
lnp = 0.5 * np.log((2. * np.pi)**len(orders)/detA) + 0.5 * BAB + G
#print(lnp)
print("Marginalized",np.sum(lnp))
#print(m.lnprob(np.array([6000, 3.5, 42, 0, 2.1e-27, 0.0, 0.0])))
Ac = np.einsum("ijk,k->ij",A,c)
cAc = np.einsum("j,ij->i",c,Ac)
Bc = np.einsum("ij,j->i",B,c)
#to obtain the original, unnormalized, unmarginalized P given c
print("Unmarginalized",np.sum(-0.5 * cAc + Bc + G))
#plt.plot(xs,T0)
#plt.plot(xs,T1)
#plt.plot(xs,T2)
#plt.plot(xs,T3)
#plt.show()
|
bsd-3-clause
|
gavinmh/keras
|
tests/manual/check_callbacks.py
|
82
|
7540
|
import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
|
mit
|
hsuantien/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
xyguo/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
50
|
4764
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
hsiaoyi0504/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
178
|
8006
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
bzero/statsmodels
|
statsmodels/sandbox/regression/tests/test_gmm_poisson.py
|
31
|
13338
|
'''
TestGMMMultTwostepDefault() has lower precision
'''
from statsmodels.compat.python import lmap
import numpy as np
from numpy.testing.decorators import skipif
import pandas
import scipy
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression import gmm
from numpy.testing import assert_allclose, assert_equal
from statsmodels.compat.scipy import NumpyVersion
def get_data():
import os
curdir = os.path.split(__file__)[0]
dt = pandas.read_csv(os.path.join(curdir, 'racd10data_with_transformed.csv'))
# Transformations compared to original data
##dt3['income'] /= 10.
##dt3['aget'] = (dt3['age'] - dt3['age'].min()) / 5.
##dt3['aget2'] = dt3['aget']**2
# How do we do this with pandas
mask = ~((np.asarray(dt['private']) == 1) & (dt['medicaid'] == 1))
mask = mask & (dt['docvis'] <= 70)
dt3 = dt[mask]
dt3['const'] = 1 # add constant
return dt3
DATA = get_data()
#------------- moment conditions for example
def moment_exponential_add(params, exog, exp=True):
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
#if not np.isfinite(predicted).all():
#print "invalid predicted", predicted
#raise RuntimeError('invalid predicted')
predicted = np.clip(predicted, 0, 1e100) # try to avoid inf
else:
predicted = np.dot(exog, params)
return predicted
def moment_exponential_mult(params, data, exp=True):
# multiplicative error model
endog = data[:,0]
exog = data[:,1:]
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
predicted = np.clip(predicted, 0, 1e100) # avoid inf
resid = endog / predicted - 1
if not np.isfinite(resid).all():
print("invalid resid", resid)
else:
resid = endog - np.dot(exog, params)
return resid
#------------------- test classes
# copied from test_gmm.py, with changes
class CheckGMM(object):
# default tolerance, overwritten by subclasses
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
q_tol = [5e-6, 1e-9]
j_tol = [5e-5, 1e-9]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
rtol, atol = self.bse_tol
assert_allclose(res1.bse, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=atol)
def test_other(self):
res1, res2 = self.res1, self.res2
rtol, atol = self.q_tol
assert_allclose(res1.q, res2.Q, rtol=atol, atol=rtol)
rtol, atol = self.j_tol
assert_allclose(res1.jval, res2.J, rtol=atol, atol=rtol)
j, jpval, jdf = res1.jtest()
# j and jval should be the same
assert_allclose(res1.jval, res2.J, rtol=13, atol=13)
#pvalue is not saved in Stata results
pval = stats.chi2.sf(res2.J, res2.J_df)
#assert_allclose(jpval, pval, rtol=1e-4, atol=1e-6)
assert_allclose(jpval, pval, rtol=rtol, atol=atol)
assert_equal(jdf, res2.J_df)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestGMMAddOnestep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
q_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False})
self.res1 = res0
from .results_gmm_poisson import results_addonestep as results
self.res2 = results
class TestGMMAddTwostep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_addtwostep as results
self.res2 = results
class TestGMMMultOnestep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
self.q_tol = [0.04, 0]
self.j_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multonestep as results
self.res2 = results
class TestGMMMultTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multtwostep as results
self.res2 = results
class TestGMMMultTwostepDefault(CheckGMM):
# compares my defaults with the same options in Stata
# agreement is not very high, maybe vce(unadjusted) is different after all
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [0.004, 5e-4]
self.params_tol = [5e-5, 5e-5]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
#wargs={'centered':True}, has_optimal_weights=True
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepdefault as results
self.res2 = results
class TestGMMMultTwostepCenter(CheckGMM):
#compares my defaults with the same options in Stata
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-4, 5e-5]
self.params_tol = [5e-5, 5e-5]
q_tol = [5e-5, 1e-8]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':True}, has_optimal_weights=False
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepcenter as results
self.res2 = results
def test_more(self):
# from Stata `overid`
J_df = 1
J_p = 0.332254330027383
J = 0.940091427212973
j, jpval, jdf = self.res1.jtest()
assert_allclose(jpval, J_p, rtol=5e-5, atol=0)
if __name__ == '__main__':
tt = TestGMMAddOnestep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMAddTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultOnestep()
tt.setup_class()
tt.test_basic()
#tt.test_other()
tt = TestGMMMultTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepDefault()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepCenter()
tt.setup_class()
tt.test_basic()
tt.test_other()
|
bsd-3-clause
|
0todd0000/rft1d
|
rft1d/examples/paper/fig12_val_set.py
|
1
|
2159
|
import numpy as np
from scipy import stats
from matplotlib import pyplot,cm
import rft1d
import myplot,myh5
eps = np.finfo(float).eps #smallest float
### EPS production preliminaries:
fig_width_mm = 100
fig_height_mm = 80
mm2in = 1/25.4
fig_width = fig_width_mm*mm2in # width in inches
fig_height = fig_height_mm*mm2in # height in inches
params = { 'backend':'ps', 'axes.labelsize':14,
'font.size':12, 'text.usetex': False, 'legend.fontsize':12,
'xtick.labelsize':8, 'ytick.labelsize':8,
'font.family':'Times New Roman', #Times
'lines.linewidth':0.5,
'patch.linewidth':0.25,
'figure.figsize': [fig_width,fig_height]}
pyplot.rcParams.update(params)
#(0) Set parameters:
np.random.seed(0)
nResponses = 500 #raise to 50000 to reproduce the results from the paper
nNodes = 101
FWHM = 8.5
interp = True
wrap = True
heights = [2.0, 2.2, 2.4]
c = 2
### generate data:
y = rft1d.randn1d(nResponses, nNodes, FWHM)
calc = rft1d.geom.ClusterMetricCalculator()
rftcalc = rft1d.prob.RFTCalculator(STAT='Z', nodes=nNodes, FWHM=FWHM)
#(1) Maximum region size:
K0 = np.linspace(eps, 8, 21)
K = [[calc.cluster_extents(yy, h, interp, wrap) for yy in y] for h in heights]
### compute number of upcrossings above a threshold:
C = np.array([[[ sum([kkk>=k0 for kkk in kk]) for kk in k] for k in K] for k0 in K0])
P = np.mean(C>=c, axis=2).T
P0 = np.array([[rftcalc.p.set(c, k0, h) for h in heights] for k0 in K0/FWHM]).T
#(2) Plot results:
pyplot.close('all')
colors = ['b', 'g', 'r']
ax = pyplot.axes([0.17,0.14,0.80,0.84])
for color,p,p0,u in zip(colors,P,P0,heights):
ax.plot(K0, p, 'o', color=color, markersize=5)
ax.plot(K0, p0, '-', color=color, label='$u$ = %.1f'%u)
### legend:
ax.plot([0,1],[10,10], 'k-', label='Theoretical')
ax.plot([0,1],[10,10], 'ko-', label='Simulated', markersize=5)
ax.legend()
### axis labels:
ax.set_xlabel('$k_\mathrm{min}$', size=16)
ax.set_ylabel('$P(c | k_\mathrm{min}) >= 2$', size=16)
ax.set_ylim(0, 0.08)
pyplot.show()
# pyplot.savefig('fig_valid_gauss1d_set.pdf')
|
gpl-3.0
|
sshh12/Students-Visualization
|
other_visuals/show_gpa_demo_plot_stats.py
|
1
|
1818
|
from cyranchdb import cyranch_db
from collections import defaultdict
import math
user_map = {} # user id -> school, grade, gender
gpas_map = {} # user id -> pos, gpa
for index, row in cyranch_db.tables.demo.all().iterrows():
if row["gradelevel"] > 12:
continue
user_map[row.user_id] = {
"gender": row["gender"],
"language": row["language"]
}
for index, row in cyranch_db.tables.rank.all().iterrows():
gpas_map[row["user_id"]] = (row["pos"], row["gpa"])
def create_plot():
"""Plots with matplot lib"""
import matplotlib.pyplot as plt
def create_scatter_plot(plot, feature, values, color_map):
feature_sets = {val: [] for val in values}
for user in user_map:
if user in gpas_map and user_map[user][feature] in values:
feature_sets[user_map[user][feature]].append(gpas_map[user])
for val in values:
x, y = [], []
for pos, gpa in feature_sets[val]:
x.append(int(pos))
y.append(float(gpa))
plot.scatter(x, y, c=color_map[val], marker='.')
plot.legend(values)
plot.set_ylabel("GPA")
plot.set_xlabel("Rank")
f, ((ax1, ax2)) = plt.subplots(2, 1, sharey=True)
plt.style.use('ggplot')
create_scatter_plot(ax1, "gender",
["male", "female"],
{"male": "b", "female": "r"})
create_scatter_plot(ax2, "language",
["english", "spanish", "vietnamese", "arabic", "cantonese"],
{"english": "r", "spanish": "g", "vietnamese": "b", "arabic": "y", "cantonese": "c"})
f.suptitle('GPA Gender/Language')
plt.show()
if __name__ == "__main__":
create_plot()
|
mit
|
chrisb13/mkmov
|
commands/_twodbm.py
|
2
|
3231
|
## Author: Christopher Bull.
## Affiliation: Climate Change Research Centre and ARC Centre of Excellence for Climate System Science.
## Level 4, Mathews Building
## University of New South Wales
## Sydney, NSW, Australia, 2052
## Contact: [email protected]
## www: christopherbull.com.au
## Date created: Thu Jun 5 10:11:55 EST 2014
## Machine created on: squall.ccrc.unsw.edu.au
##
## The virtualenv packages available on creation date (includes systemwide):
## Cartopy==0.11.x
## Cython==0.19.1
## Fiona==1.1.2
## GDAL==1.10.1
## Jinja==1.2
## Jinja2==2.7.2
## MDP==3.3
## MarkupSafe==0.18
## PyNGL==1.4.0
## Pygments==1.6
## ScientificPython==2.8
## Shapely==1.3.0
## Sphinx==1.2.1
## backports.ssl-match-hostname==3.4.0.2
## basemap==1.0.7
## brewer2mpl==1.4
## descartes==1.0.1
## distribute==0.7.3
## docutils==0.11
## geopandas==0.1.0.dev-1edddad
## h5py==2.2.0
## ipython==1.2.0
## joblib==0.7.1
## matplotlib==1.3.1
## netCDF4==1.0.4
## nose==1.3.3
## numexpr==2.2.2
## numpy==1.8.1
## pandas==0.13.1
## patsy==0.2.1
## pexpect==2.4
## prettyplotlib==0.1.7
## progressbar==2.3
## py==1.4.20
## pycairo==1.8.6
## pygrib==1.9.7
## pyhdf==0.8.3
## pyparsing==2.0.2
## pyproj==1.9.3
## pyshp==1.2.1
## pytest==2.5.2
## python-dateutil==2.2
## pytz==2014.1
## pyzmq==14.0.1
## scikit-learn==0.13.1
## scipy==0.12.0
## seaborn==0.3.1
## six==1.6.1
## statsmodels==0.5.0
## tables==3.0.0
## tornado==3.2.1
## virtualenv==1.10.1
## wsgiref==0.1.2
## xmltodict==0.8.6
##
## The modules availabe on creation date:
## # Currently Loaded Modulefiles:
# 1) hdf5/1.8.11-intel 5) matlab/2011b 9) perl/5.18.2
# 2) ncview/2.1.2 6) python/2.7.5 10) gdal/1.10.1
# 3) netcdf/3.6.3-intel 7) proj/4.8.0
# 4) intel/13.1.3.192 8) geos/3.3.3
## # Currently Loaded Modulefiles:
# 1) hdf5/1.8.11-intel 5) matlab/2011b 9) perl/5.18.2
# 2) ncview/2.1.2 6) python/2.7.5 10) gdal/1.10.1
# 3) netcdf/3.6.3-intel 7) proj/4.8.0
# 4) intel/13.1.3.192 8) geos/3.3.3
#
#python logging
import logging as _logging
from functools import wraps as _wraps
class _LogStart(object):
"class that sets up a logger"
def setup(self,fname=''):
if fname=='':
# _logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
# level=_logging.DEBUG,disable_existing_loggers=True)
_logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
level=_logging.DEBUG)
else:
_logging.basicConfig(filename=fname,filemode='w',format='%(name)s - %(levelname)s - %(message)s',
level=lg.DEBUG,disable_existing_loggers=False) #where filemode clobbers file
lg = _logging.getLogger(__name__)
return lg
if __name__ == "__main__": #are we being run directly?
lg=_LogStart().setup()
#lg=meh.go()
# print __name__
#LogStart(args.inputdir+'asciplot_lc_katana'+args.fno + '.log',fout=True)
lg.info('moo')
#PUT wothwhile code here!
|
gpl-3.0
|
ethz-nus/lis-2015
|
project3/train.py
|
1
|
2053
|
import h5py
from methods import *
import numpy as np
import cudamat
from scipy.stats.mstats import mode
from sklearn.covariance import EllipticEnvelope
doTest = True
def pred_score(truth, pred):
score = np.sum(map(lambda x: x[1] != pred[x[0]], enumerate(truth)))
return 1.0/len(truth) * score
def run_prediction(tfile, yclassifier):
testX = np.array(tfile['data'])
yRes = yclassifier.predict(testX)
#yProbs = yclassifier.predict_proba(testX)
#print yProbs
return yRes
def save_prediction(outname, pred, score):
# out = h5py.File(outname + '-%f.h5' % score , 'w')
shape = pred.shape
pred = pred.reshape((shape[1], shape[0]))
# out.flush()
np.savetxt(outname + '-%f.txt' % score, pred, fmt="%d", delimiter=',')
def run_and_save_prediction(tfile, outname, yclassifier, combinedScore):
yRes = run_prediction(tfile, yclassifier)
save_prediction(outname, yRes, combinedScore)
def save_mode_predictions(yResults, score, filename):
yCombined = mode(np.array(yResults))[0]
save_prediction(filename, yCombined, combinedScore)
train = h5py.File("project_data/train.h5", "r")
validate = h5py.File("project_data/validate.h5", "r")
test = h5py.File("project_data/test.h5", "r")
X = np.array(train['data'])
Y = np.array(train['label'])
runs = 20
scores = []
yResults = []
incScores = 0
yTestResults = []
for i in range(runs):
ytrainer = deep_belief_network
print 'running ' + ytrainer.__name__
print 'training'
Y = np.ravel(Y)
yclassifier, ypred, ytruth = ytrainer(X, Y)
score = pred_score(ytruth, ypred)
scores.append(score)
print score
threshold = 0.28
if score < threshold:
print 'predicting'
yRes = run_prediction(validate, yclassifier)
yResults.append(yRes)
incScores += score
if doTest:
yTestRes = run_prediction(test, yclassifier)
yTestResults.append(yTestRes)
if yResults:
combinedScore = incScores / len(yResults)
save_mode_predictions(yResults, combinedScore, "validate")
if doTest:
save_mode_predictions(yTestResults, combinedScore, "test")
print np.mean(scores)
print np.std(scores)
|
mit
|
rajat1994/scikit-learn
|
sklearn/utils/graph.py
|
289
|
6239
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
sinhrks/scikit-learn
|
examples/manifold/plot_swissroll.py
|
330
|
1446
|
"""
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
|
bsd-3-clause
|
appapantula/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
wzbozon/scikit-learn
|
examples/ensemble/plot_gradient_boosting_quantile.py
|
392
|
2114
|
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
dsm054/pandas
|
pandas/tests/reshape/merge/test_merge.py
|
1
|
79282
|
# pylint: disable=E1103
import random
import re
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pytest
from numpy import nan
from numpy.random import randn
import pandas as pd
import pandas.util.testing as tm
from pandas import (Categorical, CategoricalIndex, DataFrame, DatetimeIndex,
Float64Index, Index, Int64Index, MultiIndex, RangeIndex,
Series, UInt64Index)
from pandas.api.types import CategoricalDtype as CDT
from pandas.compat import lrange, lzip
from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import MergeError, merge
from pandas.util.testing import assert_frame_equal, assert_series_equal
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(object):
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_merge_inner_join_empty(self):
# GH 15328
df_empty = pd.DataFrame()
df_a = pd.DataFrame({'a': [1, 2]}, index=[0, 1], dtype='int64')
result = pd.merge(df_empty, df_a, left_index=True, right_index=True)
expected = pd.DataFrame({'a': []}, index=[], dtype='int64')
assert_frame_equal(result, expected)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_merge_index_as_on_arg(self):
# GH14355
left = self.df.set_index('key1')
right = self.df2.set_index('key1')
result = merge(left, right, on='key1')
expected = merge(self.df, self.df2, on='key1').set_index('key1')
assert_frame_equal(result, expected)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').loc[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').loc[result.index]
assert_frame_equal(result, expected.loc[:, result.columns])
def test_merge_misspecified(self):
pytest.raises(ValueError, merge, self.left, self.right,
left_index=True)
pytest.raises(ValueError, merge, self.left, self.right,
right_index=True)
pytest.raises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
pytest.raises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_index_and_on_parameters_confusion(self):
pytest.raises(ValueError, merge, self.df, self.df2, how='left',
left_index=False, right_index=['key1', 'key2'])
pytest.raises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'], right_index=False)
pytest.raises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'],
right_index=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
assert len(merged) == exp_len
assert 'v1_x' in merged
assert 'v1_y' in merged
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],
name='lkey')
tm.assert_series_equal(merged['lkey'], exp)
exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],
name='rkey')
tm.assert_series_equal(merged['rkey'], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')
tm.assert_series_equal(merged['value_x'], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')
tm.assert_series_equal(merged['value_y'], exp)
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
assert (left['a'] == 0).all()
merged['d'] = 'peekaboo'
assert (right['d'] == 'bar').all()
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
assert (left['a'] == 6).all()
merged['d'] = 'peekaboo'
assert (right['d'] == 'peekaboo').all()
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': [0, 1, 0, 1, 2, 2, 3, 4, 5]},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({'key': [1], 'v1': [10]})
df2 = DataFrame({'key': [2], 'v1': [20]})
df = merge(df1, df2, how='outer')
assert df['key'].dtype == 'int64'
df1 = DataFrame({'key': [True], 'v1': [1]})
df2 = DataFrame({'key': [False], 'v1': [0]})
df = merge(df1, df2, how='outer')
# GH13169
# this really should be bool
assert df['key'].dtype == 'object'
df1 = DataFrame({'val': [1]})
df2 = DataFrame({'val': [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how='outer')
assert df['key_0'].dtype == 'int64'
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
assert merged['key'].notna().all()
assert merged2['key'].notna().all()
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
tm.assert_series_equal(merged['key_0'], Series([1, 1, 1, 1, 2,
2, 3, 4, 5],
name='key_0'))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how='outer')
tm.assert_series_equal(merged['key_0'], Series(key, name='key_0'))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
pytest.raises(MergeError, merge, df1, df2)
msg = ('No common columns to perform merge on. '
'Merge options: left_on={lon}, right_on={ron}, '
'left_index={lidx}, right_index={ridx}'
.format(lon=None, ron=None, lidx=False, ridx=False))
with pytest.raises(MergeError, match=msg):
merge(df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
@pytest.mark.parametrize('kwarg',
[dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')])
def test_merge_left_empty_right_empty(self, join_type, kwarg):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
index=pd.Index([], dtype=object),
dtype=object)
result = pd.merge(left, right, how=join_type, **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),
'b': np.array([np.nan] * 3, dtype=object),
'c': np.array([np.nan] * 3, dtype=object),
'x': [1, 4, 7],
'y': [2, 5, 8],
'z': [3, 6, 9]},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_index=True)
check1(exp_in, kwarg)
exp_out['a'] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_on='x')
check1(exp_in, kwarg)
exp_out['a'] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': [1, 4, 7],
'b': [2, 5, 8],
'c': [3, 6, 9],
'x': np.array([np.nan] * 3, dtype=object),
'y': np.array([np.nan] * 3, dtype=object),
'z': np.array([np.nan] * 3, dtype=object)},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2010, 2, 3),
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2012, 4, 3),
datetime(2012, 3, 4),
datetime(2008, 5, 1),
datetime(2010, 2, 3),
datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
assert (df.var3.unique() == result.var3.unique()).all()
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan},
'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}})
.set_index(None)
.reset_index()[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5},
'i3': {0: 0.69999999999999996,
1: nan}})
[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
assert isinstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56)],
't': [dt.timedelta(0, 22500),
dt.timedelta(0, 22500)]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix='r', how="left")
expected = DataFrame({'0': Series([td, td], index=list('AB')),
'0r': Series([td, NaT], index=list('AB'))})
assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
df2 = s.astype(dtype).to_frame('days')
# coerces to datetime64[ns], thus sholuld not be affected
assert df2['days'].dtype == 'datetime64[ns]'
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype='datetime64[ns]')},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_other_timedelta_unit(self, unit):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
dtype = "m8[{}]".format(unit)
df2 = s.astype(dtype).to_frame('days')
assert df2['days'].dtype == 'm8[ns]'
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype=dtype)},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
expected = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9],
'v3': [4, 5, 6],
'v4': [7, 8, 9]})
expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ['key1', 'foo', 'foo']
pytest.raises(ValueError, merge, df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,
tz='US/Eastern'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.date_range('20151010', periods=4,
tz='US/Eastern'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'key': [1, 2],
'value': pd.date_range('20151010', periods=2,
tz='US/Eastern')})
right = pd.DataFrame({'key': [2, 3],
'value': pd.date_range('20151011', periods=2,
tz='US/Eastern')})
expected = DataFrame({
'key': [1, 2, 3],
'value_x': list(pd.date_range('20151010', periods=2,
tz='US/Eastern')) + [pd.NaT],
'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,
tz='US/Eastern'))})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]'
assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]'
def test_merge_datetime64tz_with_dst_transition(self):
# GH 18885
df1 = pd.DataFrame(pd.date_range(
'2017-10-29 01:00', periods=4, freq='H', tz='Europe/Madrid'),
columns=['date'])
df1['value'] = 1
df2 = pd.DataFrame({
'date': pd.to_datetime([
'2017-10-29 03:00:00', '2017-10-29 04:00:00',
'2017-10-29 05:00:00'
]),
'value': 2
})
df2['date'] = df2['date'].dt.tz_localize('UTC').dt.tz_convert(
'Europe/Madrid')
result = pd.merge(df1, df2, how='outer', on='date')
expected = pd.DataFrame({
'date': pd.date_range(
'2017-10-29 01:00', periods=7, freq='H', tz='Europe/Madrid'),
'value_x': [1] * 4 + [np.nan] * 3,
'value_y': [np.nan] * 4 + [2] * 3
})
assert_frame_equal(result, expected)
def test_merge_non_unique_period_index(self):
# GH #16871
index = pd.period_range('2016-01-01', periods=16, freq='M')
df = DataFrame([i for i in range(len(index))],
index=index, columns=['pnum'])
df2 = concat([df, df])
result = df.merge(df2, left_index=True, right_index=True, how='inner')
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=['pnum_x', 'pnum_y'], index=df2.sort_index().index)
tm.assert_frame_equal(result, expected)
def test_merge_on_periods(self):
left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,
freq='D'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,
freq='D'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.period_range('20151010', periods=4,
freq='D'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'key': [1, 2],
'value': pd.period_range('20151010', periods=2,
freq='D')})
right = pd.DataFrame({'key': [2, 3],
'value': pd.period_range('20151011', periods=2,
freq='D')})
exp_x = pd.period_range('20151010', periods=2, freq='D')
exp_y = pd.period_range('20151011', periods=2, freq='D')
expected = DataFrame({'key': [1, 2, 3],
'value_x': list(exp_x) + [pd.NaT],
'value_y': [pd.NaT] + list(exp_y)})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
assert result['value_x'].dtype == 'Period[D]'
assert result['value_y'].dtype == 'Period[D]'
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame({'col1': [0, 1], 'col_conflict': [1, 2],
'col_left': ['a', 'b']})
df1_copy = df1.copy()
df2 = DataFrame({'col1': [1, 2, 3, 4, 5],
'col_conflict': [1, 2, 3, 4, 5],
'col_right': [2, 2, 2, 2, 2]})
df2_copy = df2.copy()
df_result = DataFrame({
'col1': [0, 1, 2, 3, 4, 5],
'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],
'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],
'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],
'col_right': [np.nan, 2, 2, 2, 2, 2]})
df_result['_merge'] = Categorical(
['left_only', 'both', 'right_only',
'right_only', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge']]
test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
# No side effects
assert_frame_equal(df1, df1_copy)
assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={'_merge': 'custom_name'})
test_custom_name = merge(
df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with pytest.raises(ValueError):
merge(df1, df2, on='col1', how='outer', indicator=5)
with pytest.raises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
assert (test2._merge != 'right_only').all()
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
assert (test2._merge != 'right_only').all()
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
assert (test3._merge != 'left_only').all()
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
assert (test3._merge != 'left_only').all()
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
assert (test4._merge == 'both').all()
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
assert (test4._merge == 'both').all()
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})
with pytest.raises(ValueError):
merge(df1, df_badcolumn, on='col1',
how='outer', indicator=True)
with pytest.raises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame(
{'col1': [1, 2], 'custom_column_name': [2, 2]})
with pytest.raises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
with pytest.raises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
# Merge on multiple columns
df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})
df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})
hand_coded_result = DataFrame({'col1': [0, 1, 1, 3],
'col2': ['a', 'b', 'x', 'y']})
hand_coded_result['_merge'] = Categorical(
['left_only', 'both', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
test5 = merge(df3, df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
def test_validation(self):
left = DataFrame({'a': ['a', 'b', 'c', 'd'],
'b': ['cat', 'dog', 'weasel', 'horse']},
index=range(4))
right = DataFrame({'a': ['a', 'b', 'c', 'd', 'e'],
'c': ['meow', 'bark', 'um... weasel noise?',
'nay', 'chirp']},
index=range(5))
# Make sure no side effects.
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, left_index=True, right_index=True,
validate='1:1')
assert_frame_equal(left, left_copy)
assert_frame_equal(right, right_copy)
# make sure merge still correct
expected = DataFrame({'a_x': ['a', 'b', 'c', 'd'],
'b': ['cat', 'dog', 'weasel', 'horse'],
'a_y': ['a', 'b', 'c', 'd'],
'c': ['meow', 'bark', 'um... weasel noise?',
'nay']},
index=range(4),
columns=['a_x', 'b', 'a_y', 'c'])
result = merge(left, right, left_index=True, right_index=True,
validate='one_to_one')
assert_frame_equal(result, expected)
expected_2 = DataFrame({'a': ['a', 'b', 'c', 'd'],
'b': ['cat', 'dog', 'weasel', 'horse'],
'c': ['meow', 'bark', 'um... weasel noise?',
'nay']},
index=range(4))
result = merge(left, right, on='a', validate='1:1')
assert_frame_equal(left, left_copy)
assert_frame_equal(right, right_copy)
assert_frame_equal(result, expected_2)
result = merge(left, right, on='a', validate='one_to_one')
assert_frame_equal(result, expected_2)
# One index, one column
expected_3 = DataFrame({'b': ['cat', 'dog', 'weasel', 'horse'],
'a': ['a', 'b', 'c', 'd'],
'c': ['meow', 'bark', 'um... weasel noise?',
'nay']},
columns=['b', 'a', 'c'],
index=range(4))
left_index_reset = left.set_index('a')
result = merge(left_index_reset, right, left_index=True,
right_on='a', validate='one_to_one')
assert_frame_equal(result, expected_3)
# Dups on right
right_w_dups = right.append(pd.DataFrame({'a': ['e'], 'c': ['moo']},
index=[4]))
merge(left, right_w_dups, left_index=True, right_index=True,
validate='one_to_many')
with pytest.raises(MergeError):
merge(left, right_w_dups, left_index=True, right_index=True,
validate='one_to_one')
with pytest.raises(MergeError):
merge(left, right_w_dups, on='a', validate='one_to_one')
# Dups on left
left_w_dups = left.append(pd.DataFrame({'a': ['a'], 'c': ['cow']},
index=[3]), sort=True)
merge(left_w_dups, right, left_index=True, right_index=True,
validate='many_to_one')
with pytest.raises(MergeError):
merge(left_w_dups, right, left_index=True, right_index=True,
validate='one_to_one')
with pytest.raises(MergeError):
merge(left_w_dups, right, on='a', validate='one_to_one')
# Dups on both
merge(left_w_dups, right_w_dups, on='a', validate='many_to_many')
with pytest.raises(MergeError):
merge(left_w_dups, right_w_dups, left_index=True,
right_index=True, validate='many_to_one')
with pytest.raises(MergeError):
merge(left_w_dups, right_w_dups, on='a',
validate='one_to_many')
# Check invalid arguments
with pytest.raises(ValueError):
merge(left, right, on='a', validate='jibberish')
# Two column merge, dups in both, but jointly no dups.
left = DataFrame({'a': ['a', 'a', 'b', 'b'],
'b': [0, 1, 0, 1],
'c': ['cat', 'dog', 'weasel', 'horse']},
index=range(4))
right = DataFrame({'a': ['a', 'a', 'b'],
'b': [0, 1, 0],
'd': ['meow', 'bark', 'um... weasel noise?']},
index=range(3))
expected_multi = DataFrame({'a': ['a', 'a', 'b'],
'b': [0, 1, 0],
'c': ['cat', 'dog', 'weasel'],
'd': ['meow', 'bark',
'um... weasel noise?']},
index=range(3))
with pytest.raises(MergeError):
merge(left, right, on='a', validate='1:1')
result = merge(left, right, on=['a', 'b'], validate='1:1')
assert_frame_equal(result, expected_multi)
def test_merge_two_empty_df_no_division_error(self):
# GH17776, PR #17846
a = pd.DataFrame({'a': [], 'b': [], 'c': []})
with np.errstate(divide='raise'):
merge(a, a, on=('a', 'b'))
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
# TODO check_names on merge?
assert_frame_equal(result, expected, check_names=False)
class TestMergeMulti(object):
def setup_method(self, method):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.loc[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.loc[:, joined.columns].sort_values(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return (f(df['1st']) + f(df['3rd']) * 1e2 +
df['2nd'].fillna(0) * 1e4)
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
assert len(left) < len(res) + 1
assert not res['4th'].isna().any()
assert not res['5th'].isna().any()
tm.assert_series_equal(
res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
assert result.name is None
if sort:
tm.assert_frame_equal(
res, res.sort_values(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right',
sort=sort)
merged2 = merged2.loc[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3': np.array([0, 1, 2] * 8, dtype=np.float32),
'v': np.array(np.arange(24), dtype=np.int32)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.loc[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(
result,
expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(
result, expected.sort_values('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = (
DataFrame(
dict(household_id=[1, 2, 3],
male=[0, 1, 0],
wealth=[196087.3, 316478.7, 294750]),
columns=['household_id', 'male', 'wealth'])
.set_index('household_id'))
portfolio = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
name=["ABN Amro", "Robeco", "Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds", np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'name', 'share'])
.set_index(['household_id', 'asset_id']))
result = household.join(portfolio, how='inner')
expected = (
DataFrame(
dict(male=[0, 1, 1, 0, 0, 0],
wealth=[196087.3, 316478.7, 316478.7,
294750.0, 294750.0, 294750.0],
name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',
'Royal Dutch Shell',
'AAB Eastern Europe Equity Fund',
'Postbank BioTech Fonds'],
share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],
household_id=[1, 2, 2, 3, 3, 3],
asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',
'gb00b03mlx29', 'lu0197800237',
'nl0000289965']))
.set_index(['household_id', 'asset_id'])
.reindex(columns=['male', 'wealth', 'name', 'share']))
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# equivalency
result2 = (merge(household.reset_index(), portfolio.reset_index(),
on=['household_id'], how='inner')
.set_index(['household_id', 'asset_id']))
assert_frame_equal(result2, expected)
result = household.join(portfolio, how='outer')
expected = (concat([
expected,
(DataFrame(
dict(share=[1.00]),
index=MultiIndex.from_tuples(
[(4, np.nan)],
names=['household_id', 'asset_id'])))
], axis=0, sort=True).reindex(columns=expected.columns))
assert_frame_equal(result, expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
pytest.raises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id', 'foo'])
def f():
portfolio2.join(portfolio, how='inner')
pytest.raises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'share'])
.set_index(['household_id', 'asset_id']))
log_return = DataFrame(dict(
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "lu0197800237"],
t=[233, 234, 235, 180, 181],
log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id", "t"])
expected = (
DataFrame(dict(
household_id=[2, 2, 2, 3, 3, 3, 3, 3],
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237"],
t=[233, 234, 235, 233, 234, 235, 180, 181],
share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return=[.09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997]
))
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=['share', 'log_return']))
def f():
household.join(log_return, how='inner')
pytest.raises(NotImplementedError, f)
# this is the equivalency
result = (merge(household.reset_index(), log_return.reset_index(),
on=['asset_id'], how='inner')
.set_index(['household_id', 'asset_id', 't']))
assert_frame_equal(result, expected)
expected = (
DataFrame(dict(
household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237",
"nl0000289965", None],
t=[None, None, 233, 234, 235, 233, 234,
235, 180, 181, None, None],
share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,
0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return=[None, None, .09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997, None, None]
))
.set_index(["household_id", "asset_id", "t"]))
def f():
household.join(log_return, how='outer')
pytest.raises(NotImplementedError, f)
@pytest.mark.parametrize("klass", [None, np.asarray, Series, Index])
def test_merge_datetime_index(self, klass):
# see gh-19038
df = DataFrame([1, 2, 3],
["2016-01-01", "2017-01-01", "2018-01-01"],
columns=["a"])
df.index = pd.to_datetime(df.index)
on_vector = df.index.year
if klass is not None:
on_vector = klass(on_vector)
expected = DataFrame(
OrderedDict([
("a", [1, 2, 3]),
("key_1", [2016, 2017, 2018]),
])
)
result = df.merge(df, on=["a", on_vector], how="inner")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
OrderedDict([
("key_0", [2016, 2017, 2018]),
("a_x", [1, 2, 3]),
("a_y", [1, 2, 3]),
])
)
result = df.merge(df, on=[df.index.year], how="inner")
tm.assert_frame_equal(result, expected)
class TestMergeDtypes(object):
@pytest.mark.parametrize('right_vals', [
['foo', 'bar'],
Series(['foo', 'bar']).astype('category'),
[1, 2],
[1.0, 2.0],
Series([1, 2], dtype='uint64'),
Series([1, 2], dtype='int32')
])
def test_different(self, right_vals):
left = DataFrame({'A': ['foo', 'bar'],
'B': Series(['foo', 'bar']).astype('category'),
'C': [1, 2],
'D': [1.0, 2.0],
'E': Series([1, 2], dtype='uint64'),
'F': Series([1, 2], dtype='int32')})
right = DataFrame({'A': right_vals})
# GH 9780
# We allow merging on object and categorical cols and cast
# categorical cols to object
if (is_categorical_dtype(right['A'].dtype) or
is_object_dtype(right['A'].dtype)):
result = pd.merge(left, right, on='A')
assert is_object_dtype(result.A.dtype)
# GH 9780
# We raise for merging on object col and int/float col and
# merging on categorical col and int/float col
else:
msg = ("You are trying to merge on "
"{lk_dtype} and {rk_dtype} columns. "
"If you wish to proceed you should use "
"pd.concat".format(lk_dtype=left['A'].dtype,
rk_dtype=right['A'].dtype))
with pytest.raises(ValueError, match=msg):
pd.merge(left, right, on='A')
@pytest.mark.parametrize('d1', [np.int64, np.int32,
np.int16, np.int8, np.uint8])
@pytest.mark.parametrize('d2', [np.int64, np.float64,
np.float32, np.float16])
def test_join_multi_dtypes(self, d1, d2):
dtype1 = np.dtype(d1)
dtype2 = np.dtype(d2)
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('int_vals, float_vals, exp_vals', [
([1, 2, 3], [1.0, 2.0, 3.0], {'X': [1, 2, 3], 'Y': [1.0, 2.0, 3.0]}),
([1, 2, 3], [1.0, 3.0], {'X': [1, 3], 'Y': [1.0, 3.0]}),
([1, 2], [1.0, 2.0, 3.0], {'X': [1, 2], 'Y': [1.0, 2.0]}),
])
def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):
# GH 16572
# Check that float column is not cast to object if
# merging on float and int columns
A = DataFrame({'X': int_vals})
B = DataFrame({'Y': float_vals})
expected = DataFrame(exp_vals)
result = A.merge(B, left_on='X', right_on='Y')
assert_frame_equal(result, expected)
result = B.merge(A, left_on='Y', right_on='X')
assert_frame_equal(result, expected[['Y', 'X']])
def test_merge_on_ints_floats_warning(self):
# GH 16572
# merge will produce a warning when merging on int and
# float columns where the float values are not exactly
# equal to their int representation
A = DataFrame({'X': [1, 2, 3]})
B = DataFrame({'Y': [1.1, 2.5, 3.0]})
expected = DataFrame({'X': [3], 'Y': [3.0]})
with tm.assert_produces_warning(UserWarning):
result = A.merge(B, left_on='X', right_on='Y')
assert_frame_equal(result, expected)
with tm.assert_produces_warning(UserWarning):
result = B.merge(A, left_on='Y', right_on='X')
assert_frame_equal(result, expected[['Y', 'X']])
# test no warning if float has NaNs
B = DataFrame({'Y': [np.nan, np.nan, 3.0]})
with tm.assert_produces_warning(None):
result = B.merge(A, left_on='Y', right_on='X')
assert_frame_equal(result, expected[['Y', 'X']])
def test_merge_incompat_infer_boolean_object(self):
# GH21119: bool + object bool merge OK
df1 = DataFrame({'key': Series([True, False], dtype=object)})
df2 = DataFrame({'key': [True, False]})
expected = DataFrame({'key': [True, False]}, dtype=object)
result = pd.merge(df1, df2, on='key')
assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on='key')
assert_frame_equal(result, expected)
# with missing value
df1 = DataFrame({'key': Series([True, False, np.nan], dtype=object)})
df2 = DataFrame({'key': [True, False]})
expected = DataFrame({'key': [True, False]}, dtype=object)
result = pd.merge(df1, df2, on='key')
assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on='key')
assert_frame_equal(result, expected)
@pytest.mark.parametrize('df1_vals, df2_vals', [
([0, 1, 2], ["0", "1", "2"]),
([0.0, 1.0, 2.0], ["0", "1", "2"]),
([0, 1, 2], [u"0", u"1", u"2"]),
(pd.date_range('1/1/2011', periods=2, freq='D'), ['2011-01-01',
'2011-01-02']),
(pd.date_range('1/1/2011', periods=2, freq='D'), [0, 1]),
(pd.date_range('1/1/2011', periods=2, freq='D'), [0.0, 1.0]),
(pd.date_range('20130101', periods=3),
pd.date_range('20130101', periods=3, tz='US/Eastern')),
([0, 1, 2], Series(['a', 'b', 'a']).astype('category')),
([0.0, 1.0, 2.0], Series(['a', 'b', 'a']).astype('category')),
# TODO ([0, 1], pd.Series([False, True], dtype=bool)),
([0, 1], pd.Series([False, True], dtype=object))
])
def test_merge_incompat_dtypes(self, df1_vals, df2_vals):
# GH 9780, GH 15800
# Raise a ValueError when a user tries to merge on
# dtypes that are incompatible (e.g., obj and int/float)
df1 = DataFrame({'A': df1_vals})
df2 = DataFrame({'A': df2_vals})
msg = ("You are trying to merge on {lk_dtype} and "
"{rk_dtype} columns. If you wish to proceed "
"you should use pd.concat".format(lk_dtype=df1['A'].dtype,
rk_dtype=df2['A'].dtype))
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df1, df2, on=['A'])
# Check that error still raised when swapping order of dataframes
msg = ("You are trying to merge on {lk_dtype} and "
"{rk_dtype} columns. If you wish to proceed "
"you should use pd.concat".format(lk_dtype=df2['A'].dtype,
rk_dtype=df1['A'].dtype))
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df2, df1, on=['A'])
@pytest.fixture
def left():
np.random.seed(1234)
return DataFrame(
{'X': Series(np.random.choice(
['foo', 'bar'],
size=(10,))).astype(CDT(['foo', 'bar'])),
'Y': np.random.choice(['one', 'two', 'three'], size=(10,))})
@pytest.fixture
def right():
np.random.seed(1234)
return DataFrame(
{'X': Series(['foo', 'bar']).astype(CDT(['foo', 'bar'])),
'Z': [1, 2]})
class TestMergeCategorical(object):
def test_identical(self, left):
# merging on the same, should preserve dtypes
merged = pd.merge(left, left, on='X')
result = merged.dtypes.sort_index()
expected = Series([CategoricalDtype(),
np.dtype('O'),
np.dtype('O')],
index=['X', 'Y_x', 'Y_y'])
assert_series_equal(result, expected)
def test_basic(self, left, right):
# we have matching Categorical dtypes in X
# so should preserve the merged column
merged = pd.merge(left, right, on='X')
result = merged.dtypes.sort_index()
expected = Series([CategoricalDtype(),
np.dtype('O'),
np.dtype('int64')],
index=['X', 'Y', 'Z'])
assert_series_equal(result, expected)
def test_merge_categorical(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
# note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
expected['d'] = expected['d'].astype(CategoricalDtype(['null']))
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def tests_merge_categorical_unordered_equal(self):
# GH-19551
df1 = DataFrame({
'Foo': Categorical(['A', 'B', 'C'], categories=['A', 'B', 'C']),
'Left': ['A0', 'B0', 'C0'],
})
df2 = DataFrame({
'Foo': Categorical(['C', 'B', 'A'], categories=['C', 'B', 'A']),
'Right': ['C1', 'B1', 'A1'],
})
result = pd.merge(df1, df2, on=['Foo'])
expected = DataFrame({
'Foo': pd.Categorical(['A', 'B', 'C']),
'Left': ['A0', 'B0', 'C0'],
'Right': ['A1', 'B1', 'C1'],
})
assert_frame_equal(result, expected)
def test_other_columns(self, left, right):
# non-merge columns should preserve if possible
right = right.assign(Z=right.Z.astype('category'))
merged = pd.merge(left, right, on='X')
result = merged.dtypes.sort_index()
expected = Series([CategoricalDtype(),
np.dtype('O'),
CategoricalDtype()],
index=['X', 'Y', 'Z'])
assert_series_equal(result, expected)
# categories are preserved
assert left.X.values.is_dtype_equal(merged.X.values)
assert right.Z.values.is_dtype_equal(merged.Z.values)
@pytest.mark.parametrize(
'change', [lambda x: x,
lambda x: x.astype(CDT(['foo', 'bar', 'bah'])),
lambda x: x.astype(CDT(ordered=True))])
def test_dtype_on_merged_different(self, change, join_type, left, right):
# our merging columns, X now has 2 different dtypes
# so we must be object as a result
X = change(right.X.astype('object'))
right = right.assign(X=X)
assert is_categorical_dtype(left.X.values)
# assert not left.X.values.is_dtype_equal(right.X.values)
merged = pd.merge(left, right, on='X', how=join_type)
result = merged.dtypes.sort_index()
expected = Series([np.dtype('O'),
np.dtype('O'),
np.dtype('int64')],
index=['X', 'Y', 'Z'])
assert_series_equal(result, expected)
def test_self_join_multiple_categories(self):
# GH 16767
# non-duplicates should work with multiple categories
m = 5
df = pd.DataFrame({
'a': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] * m,
'b': ['t', 'w', 'x', 'y', 'z'] * 2 * m,
'c': [letter
for each in ['m', 'n', 'u', 'p', 'o']
for letter in [each] * 2 * m],
'd': [letter
for each in ['aa', 'bb', 'cc', 'dd', 'ee',
'ff', 'gg', 'hh', 'ii', 'jj']
for letter in [each] * m]})
# change them all to categorical variables
df = df.apply(lambda x: x.astype('category'))
# self-join should equal ourselves
result = pd.merge(df, df, on=list(df.columns))
assert_frame_equal(result, df)
def test_dtype_on_categorical_dates(self):
# GH 16900
# dates should not be coerced to ints
df = pd.DataFrame(
[[date(2001, 1, 1), 1.1],
[date(2001, 1, 2), 1.3]],
columns=['date', 'num2']
)
df['date'] = df['date'].astype('category')
df2 = pd.DataFrame(
[[date(2001, 1, 1), 1.3],
[date(2001, 1, 3), 1.4]],
columns=['date', 'num4']
)
df2['date'] = df2['date'].astype('category')
expected_outer = pd.DataFrame([
[pd.Timestamp('2001-01-01'), 1.1, 1.3],
[pd.Timestamp('2001-01-02'), 1.3, np.nan],
[pd.Timestamp('2001-01-03'), np.nan, 1.4]],
columns=['date', 'num2', 'num4']
)
result_outer = pd.merge(df, df2, how='outer', on=['date'])
assert_frame_equal(result_outer, expected_outer)
expected_inner = pd.DataFrame(
[[pd.Timestamp('2001-01-01'), 1.1, 1.3]],
columns=['date', 'num2', 'num4']
)
result_inner = pd.merge(df, df2, how='inner', on=['date'])
assert_frame_equal(result_inner, expected_inner)
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('category_column,categories,expected_categories',
[([False, True, True, False], [True, False],
[True, False]),
([2, 1, 1, 2], [1, 2], [1, 2]),
(['False', 'True', 'True', 'False'],
['True', 'False'], ['True', 'False'])])
def test_merging_with_bool_or_int_cateorical_column(self, category_column,
categories,
expected_categories,
ordered):
# GH 17187
# merging with a boolean/int categorical column
df1 = pd.DataFrame({'id': [1, 2, 3, 4],
'cat': category_column})
df1['cat'] = df1['cat'].astype(CDT(categories, ordered=ordered))
df2 = pd.DataFrame({'id': [2, 4], 'num': [1, 9]})
result = df1.merge(df2)
expected = pd.DataFrame({'id': [2, 4], 'cat': expected_categories,
'num': [1, 9]})
expected['cat'] = expected['cat'].astype(
CDT(categories, ordered=ordered))
assert_frame_equal(expected, result)
@pytest.fixture
def left_df():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right_df():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
class TestMergeOnIndexes(object):
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
result = pd.merge(left_df, right_df,
left_index=True,
right_index=True,
how=how,
sort=sort)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'index', [
CategoricalIndex(['A', 'B'], categories=['A', 'B'], name='index_col'),
Float64Index([1.0, 2.0], name='index_col'),
Int64Index([1, 2], name='index_col'),
UInt64Index([1, 2], name='index_col'),
RangeIndex(start=0, stop=2, name='index_col'),
DatetimeIndex(["2018-01-01", "2018-01-02"], name='index_col'),
], ids=lambda x: type(x).__name__)
def test_merge_index_types(index):
# gh-20777
# assert key access is consistent across index types
left = DataFrame({"left_data": [1, 2]}, index=index)
right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
result = left.merge(right, on=['index_col'])
expected = DataFrame(
OrderedDict([('left_data', [1, 2]), ('right_data', [1.0, 2.0])]),
index=index)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("on,left_on,right_on,left_index,right_index,nms,nm", [
(['outer', 'inner'], None, None, False, False, ['outer', 'inner'], 'B'),
(None, None, None, True, True, ['outer', 'inner'], 'B'),
(None, ['outer', 'inner'], None, False, True, None, 'B'),
(None, None, ['outer', 'inner'], True, False, None, 'B'),
(['outer', 'inner'], None, None, False, False, ['outer', 'inner'], None),
(None, None, None, True, True, ['outer', 'inner'], None),
(None, ['outer', 'inner'], None, False, True, None, None),
(None, None, ['outer', 'inner'], True, False, None, None)])
def test_merge_series(on, left_on, right_on, left_index, right_index, nms, nm):
# GH 21220
a = pd.DataFrame({"A": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product([['a', 'b'], [0, 1]],
names=['outer', 'inner']))
b = pd.Series([1, 2, 3, 4],
index=pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=['outer', 'inner']), name=nm)
expected = pd.DataFrame({"A": [2, 4], "B": [1, 3]},
index=pd.MultiIndex.from_product([['a', 'b'], [1]],
names=nms))
if nm is not None:
result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match='a Series without a name'):
result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index)
|
bsd-3-clause
|
sebhtml/assembly
|
lib/assembly/job.py
|
2
|
7536
|
import os
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy.numarray as na
import asmtypes
import shock
class ArastJob(dict):
"""
"""
def __init__(self, *args):
dict.__init__(self, *args)
self['pipelines'] = [] # List of ArastPipeline
self['out_contigs'] = []
self['out_scaffolds'] = []
self['logfiles'] = []
self['out_reports'] = []
self['out_results'] = []
def make_plots(self):
pass
def plot_ale(self):
a_scores = []
names = []
for pl in self['pipelines']:
try:
a_scores.append(pl['stats']['ale_score'])
names.append(pl['name'])
except:
pass
if len(a_scores) < 2:
print ('Not enough ALE scores')
return
## normalize scores
old_min = min(a_scores)
old_max = max(a_scores)
new_min = 5
new_max = 100
old_range = old_max - old_min
new_range = new_max - new_min
n_scores = []
for a in a_scores:
n = (((a - old_min) * new_range) / old_range) + new_min
n_scores.append(n)
xlocations = na.array(range(len(n_scores))) + 0.5
width = 0.5
fig = plt.figure()
plt.bar(xlocations, n_scores, width=width, linewidth=0, color='#CC99FF')
plt.xticks(xlocations + width/2, names)
plt.xlim(0, xlocations[-1]+width*2)
plt.title("Relative ALE Scores")
plt.yticks(range(0, new_max + 10, 10))
ale_fig = os.path.join(self['datapath'], str(self['job_id']), 'ale.png')
plt.savefig(ale_fig)
return ale_fig
def export(self):
pass
def import_quast(self, qreport):
if self['reference']:
n50_line = 14
else:
n50_line = 12
f = open(qreport)
for i in range(n50_line):
line = f.readline()
n50_scores = [int(x) for x in re.split('\s*', line)[1:-1]]
if len(n50_scores) == len(self['pipelines']):
for i,pipe in enumerate(self['pipelines']):
pipe['stats']['N50'] = n50_scores[i]
def add_pipeline(self, num, modules):
""" MODULES is a list or dict """
pipeline = ArastPipeline({'number': num})
if type(modules) is list:
for i, module in enumerate(modules):
new_module = ArastModule({'number': i+1,
'module': module})
pipeline['modules'].append(new_module)
self['pipelines'].append(pipeline)
return pipeline
def get_pipeline(self, number):
for pipeline in self['pipelines']:
if pipeline['number'] == number:
return pipeline
def add_results(self, filesets):
if filesets:
if not type(filesets) is list:
filesets = [filesets]
for f in filesets:
self['out_results'].append(f)
@property
def results(self):
"""Return all output FileSets"""
return self['out_results']
def get_all_ftypes(self):
ft = []
for fileset in self.get_all_filesets():
for fileinfo in fileset['file_infos']:
ft.append((fileinfo['local_file'], fileset['type']))
return ft
def upload_results(self, url, token):
""" Renames and uploads all filesets and updates shock info """
new_sets = []
rank = 1
for i,fset in enumerate(self.results):
if fset.type == 'contigs' or fset.type == 'scaffolds':
fset.add_tag('rank-' + str(rank))
rank += 1
new_files = []
for j, f in enumerate(fset['file_infos']):
if len(fset['file_infos']) > 1:
file_suffix = '_{}'.format(j+1)
else: file_suffix = ''
ext = f['local_file'].split('.')[-1]
if not f['keep_name']:
new_file = '{}/{}.{}{}.{}'.format(os.path.dirname(f['local_file']),
i+1, fset.name, file_suffix, ext)
os.symlink(f['local_file'], new_file)
else: new_file = f['local_file']
res = self.upload_file(url, self['user'], token, new_file, filetype=fset.type)
f.update({'shock_url': url, 'shock_id': res['data']['id'],
'filename': os.path.basename(new_file)})
new_files.append(f)
fset.update_fileinfo(new_files)
new_sets.append(fset)
self['result_data'] = new_sets
return new_sets
def upload_file(self, url, user, token, file, filetype='default'):
files = {}
files["file"] = (os.path.basename(file), open(file, 'rb'))
sclient = shock.Shock(url, user, token)
res = sclient.upload_file(file, filetype, curl=True)
return res
def wasp_data(self):
"""
Compatibility layer for wasp data types.
Scans self for certain data types and populates a FileSetContainer
"""
all_sets = []
#### Convert Old Reads Format to ReadSets
for set_type in ['reads', 'reference']:
if set_type in self:
for fs in self[set_type]:
### Get supported set attributes (ins, std, etc)
kwargs = {}
for key in ['insert', 'stdev']:
if key in fs:
kwargs[key] = fs[key]
all_sets.append(asmtypes.set_factory(fs['type'],
[asmtypes.FileInfo(f) for f in fs['files']],
**kwargs))
#### Convert final_contigs from pipeline mode
if 'final_contigs' in self:
if self['final_contigs']: ## Not empty
## Remove left over contigs
del(self['contigs'])
for contig_data in self['final_contigs']:
all_sets.append(asmtypes.set_factory('contigs',
[asmtypes.FileInfo(fs,) for fs in contig_data['files']],
#{'name':contig_data['name']}))
name=contig_data['name']))
#### Convert Contig/Ref format
# for set_type in ['contigs', 'reference']:
# if set_type in self:
# all_sets.append(asmtypes.set_factory(set_type, [asmtypes.FileInfo(fs) for fs in self[set_type]]))
return asmtypes.FileSetContainer(all_sets)
class ArastPipeline(dict):
""" Pipeline object """
def __init__(self, *args):
dict.__init__(self, *args)
self['modules'] = []
self['stats'] = {}
def get_module(self, number):
for module in self['modules']:
if module['number'] == number:
return module
def import_reapr(self):
pass
def import_ale(self, ale_report):
f = open(ale_report)
line = f.readline()
self['stats']['ale_score'] = float(line.split(' ')[2])
f.close()
class ArastModule(dict):
def __init__(self, *args):
dict.__init__(self, *args)
|
mit
|
arokem/scipy
|
scipy/interpolate/ndgriddata.py
|
2
|
7566
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbor interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(x, y)
Nearest-neighbor interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-D data.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
|
bsd-3-clause
|
mmmatthew/raycast
|
code/s08__cast_rays_3d.py
|
1
|
5904
|
"""
Reads:
- 2D prediction clusters from previous step
- location: see output from previous step
- format: see output from previous step
- 3D mesh
- location: defined in settings file
- format: ply or STL
- library: VTK
- Camera calibration parameters
Tasks:
- For each prediction cluster in each image
- Compute ray in 3D
- Intersect with mesh
- Extract first intersection point (or maybe all intersection points?)
Writes:
- 3D predictions
- Coordinates
- Standard deviation of cluster (assuming 2D normal distribution)
- Reference to image
- Number of hits
----------------
- location: [project home directory]/6_cast_rays_3d/
- format: TO BE DEFINED
Tools:
- [Pycaster for casting rays onto mesh]
- OR use the VTK library directly
"""
import os
from glob import glob
import vtk
import numpy as np
import pandas as pd
import helpers
import csv
def cast_rays_3d(config, debug, settings):
# Where to find the 2D clusters
cluster_folder = os.path.join(config['iteration_directory'],
settings['general']['iterations_structure']['detect'])
# Get camera calibration information
camera_params = helpers.read_camera_params(
settings['inputs']['camera_xyz_offset'],
settings['inputs']['camera_params'])
# Load 3D mesh
print 'Loading 3D mesh...'
mesh = loadSTL(settings['inputs']['3dmesh'])
# Load mesh offset
with open(settings['inputs']['3dmesh_offset'], 'r') as offsetfile:
mesh_offset = np.array(map(float, offsetfile.readlines()[0].split()))
# build OBB tree for fast intersection search
print 'Building OBB tree...'
obbTree = vtk.vtkOBBTree()
obbTree.SetDataSet(mesh)
obbTree.BuildLocator()
for fold_i in range(settings['general']['do_folds']):
print('-- FOLD {} --'.format(fold_i))
# initialize result lists
r = {
'x': [],
'y': [],
'z': [],
'score': [],
'id': [],
'image': [],
'img_x': [],
'img_y': []
}
# compute 3d points
for cluster_list_file in glob(cluster_folder + '/fold_{}/*.csv'.format(fold_i)):
# read clusters
cluster_list = np.loadtxt(cluster_list_file, skiprows=1, delimiter=';',
ndmin=2)
# fetch parameters for image
this_cam_params = filter(lambda c: c['camera_name'].split('.')[0] == os.path.basename(cluster_list_file).split('.')[0], camera_params)[0]
# compute camera transforms
KRt = np.dot(np.dot(this_cam_params['K'], this_cam_params['R']), this_cam_params['t'])
KRinv = np.linalg.inv(np.dot(this_cam_params['K'], this_cam_params['R']))
# the zval is like an estimated elevation difference between camera and point
zval = 200
# intersection source (camera)
intersection_source = np.transpose(this_cam_params['t'])[0] - mesh_offset
# for each cluster
for cluster in cluster_list:
# Step 1: project to 3D space
X2d = np.array([
[cluster[0] * zval / float(settings['inputs']['image_pixel_x'])],
[cluster[1] * zval / float(settings['inputs']['image_pixel_y'])],
[zval]])
X3d = np.dot(KRinv, (X2d + KRt))
# target (3D point) and intersections
intersection_target = np.transpose(X3d)[0] - mesh_offset
pointsVTKintersection = vtk.vtkPoints()
# Step 2: intersect line with surface and retain first
code = obbTree.IntersectWithLine(intersection_source, intersection_target, pointsVTKintersection, None)
# code is non-zero if there was an intersection
if code != 0:
pointsVTKIntersectionData = pointsVTKintersection.GetData()
noPointsVTKIntersection = pointsVTKIntersectionData.GetNumberOfTuples()
# retain the first intersect
(x, y, z) = pointsVTKIntersectionData.GetTuple3(0)
# append results to list
r['x'].append(x + mesh_offset[0])
r['y'].append(y + mesh_offset[1])
r['z'].append(z + mesh_offset[2])
r['score'].append(cluster[2]) # the score of the cluster
r['id'].append(int(cluster[3])) # the id of the cluster
r['image'].append(os.path.basename(cluster_list_file).split('.')[0]) # the image in which the cluster was detected
r['img_x'].append(int(X2d[0][0] / zval)) # image coordinates
r['img_y'].append(int(X2d[1][0] / zval))
# write results to dataframe
# Where to save 3D points
output_file = os.path.join(config['iteration_directory'],
settings['general']['iterations_structure']['cast'],
'3dpoints_{}.csv'.format(fold_i))
pd.DataFrame({
'x': r['x'],
'y': r['y'],
'z': r['z'],
'score': r['score'],
'id': r['id'],
'image': r['image'],
'img_x': r['img_x'],
'img_y': r['img_y']
}).to_csv(output_file)
return 0
def loadSTL(filenameSTL):
readerSTL = vtk.vtkSTLReader()
readerSTL.SetFileName(filenameSTL)
# 'update' the reader i.e. read the .stl file
readerSTL.Update()
polydata = readerSTL.GetOutput()
# If there are no points in 'vtkPolyData' something went wrong
if polydata.GetNumberOfPoints() == 0:
raise ValueError(
"No point data could be loaded from '" + filenameSTL)
return polydata
|
apache-2.0
|
juhuntenburg/pipelines
|
src/clustering/clustering/secondlevelcluster.py
|
2
|
3009
|
import matplotlib
matplotlib.use('Agg')
import os
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from consensus import Consensus
from cluster import Cluster
from variables import analysis_subjects, analysis_sessions, workingdir, resultsdir, freesurferdir, hemispheres, similarity_types, cluster_types, n_clusters
def get_wf():
wf = pe.Workflow(name="main_workflow")
wf.base_dir = os.path.join(workingdir,"intercluster_analysis")
wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"
##Infosource##
subject_id_infosource = pe.Node(util.IdentityInterface(fields=['subject_id']), name="subject_id_infosource")
subject_id_infosource.iterables = ('subject_id', analysis_subjects)
session_infosource = pe.Node(util.IdentityInterface(fields=['session']), name="session_infosource")
session_infosource.iterables = ('session', analysis_sessions)
hemi_infosource = pe.Node(util.IdentityInterface(fields=['hemi']), name="hemi_infosource")
hemi_infosource.iterables = ('hemi', hemispheres)
sim_infosource = pe.Node(util.IdentityInterface(fields=['sim']), name="sim_infosource")
sim_infosource.iterables = ('sim', similarity_types)
cluster_infosource = pe.Node(util.IdentityInterface(fields=['cluster']), name="cluster_infosource")
cluster_infosource.iterables = ('cluster', cluster_types)
n_clusters_infosource = pe.Node(util.IdentityInterface(fields=['n_clusters']), name="n_clusters_infosource")
n_clusters_infosource.iterables = ('n_clusters', n_clusters)
##Datagrabber for cluster_type##
dg_clusters = pe.Node(nio.DataGrabber(infields=['subject_id','session','hemi'], outfields=['all_cluster_types']), name="dg_clusters")
dg_clusters.inputs.base_directory = resultsdir+'clustered/'
dg_clusters.inputs.template = '*%s*/*%s*/*%s*/*%s*/*%s*/*%s*/*'
dg_clusters.inputs.template_args['all_cluster_types'] = [['hemi', 'session','subject_id','*','*','n_clusters']]
dg_clusters.inputs.sort_filelist = True
wf.connect(subject_id_infosource, 'subject_id', dg_clusters, 'subject_id')
wf.connect(session_infosource, 'session', dg_clusters, 'session')
wf.connect(hemi_infosource, 'hemi', dg_clusters, 'hemi')
wf.connect(n_clusters_infosource, 'n_clusters', dg_clusters, 'n_clusters')
##Cluster the Consensus Matrix##
consensus_cluster = pe.Node(Cluster(), name = 'consensus')
wf.connect(intercluster, 'consensus_mat', consensus_cluster, 'in_File')
##Datasink
ds = pe.Node(nio.DataSink(), name="datasink")
ds.inputs.base_directory = resultsdir
wf.connect(consensus_cluster, 'out_File', ds, 'consensus_clustered')
wf.write_graph()
return wf
if __name__ == '__main__':
wf = get_wf()
#wf.run(plugin="CondorDAGMan", plugin_args={"template":"universe = vanilla\nnotification = Error\ngetenv = true\nrequest_memory=4000"})
wf.run(plugin="MultiProc", plugin_args={"n_procs":8})
|
mit
|
abidrahmank/MyRoughWork
|
blog_works/hist_backprojection_implementation.py
|
1
|
1091
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
#roi is the object or region of object we need to find
roi = cv2.imread('rose_red.png')
hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
#target is the image we search in
target = cv2.imread('rose.png')
hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
M = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
I = cv2.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )
R = M/(I+1)
print R.max()
#cv2.normalize(prob,prob,0,255,cv2.NORM_MINMAX,0)
h,s,v = cv2.split(hsvt)
B = R[h.ravel(),s.ravel()]
B = np.minimum(B,1)
B = B.reshape(hsvt.shape[:2])
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(B,-1,disc,B)
B = np.uint8(B)
cv2.normalize(B,B,0,255,cv2.NORM_MINMAX)
ret,thresh = cv2.threshold(B,50,255,0)
res = cv2.bitwise_and(target,target,mask = thresh)
cv2.imshow('nice',res)
cv2.imshow('img',target)
res = np.vstack((target,cv2.merge((B,B,B)),res))
#cv2.imwrite('thresh.png',thresh)
cv2.imwrite('output.png',res)
cv2.waitKey(0)
cv2.destroyAllWindows()
##plt.imshow(B)
##plt.show()
|
mit
|
phobson/statsmodels
|
statsmodels/graphics/tests/test_correlation.py
|
31
|
1112
|
import numpy as np
from numpy.testing import dec
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
from statsmodels.datasets import randhie
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_corr():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr(corr_matrix, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, normcolor=True, title='', cmap='jet')
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_corr_grid():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr_grid([corr_matrix] * 2, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 5, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 3, normcolor=True, titles='', cmap='jet')
plt.close(fig)
|
bsd-3-clause
|
cherusk/lindwurm
|
illustrator/cohesion_construer.py
|
1
|
7062
|
# lindwurm
#Copyright (C) 2016 Matthias Tafelmeier
#lindwurm is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#lindwurm is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#from illustrator.illustrator_core import Construer
import json
from colorama import Fore, Back, Style
import networkx as nx
import matplotlib.pyplot as plt
from collections import Callable
from sets import Set
from asciitree import LeftAligned
from collections import OrderedDict as OD
from asciitree.drawing import BoxStyle, BOX_HEAVY
class Construer:
"""
Core Construer Iface
"""
def __init__(self, aggreg_run_data, args):
pass
def do_graphical(self):
raise NotImplementedError
def do_term(self, args):
raise NotImplementedError
@staticmethod
def traverse(obj, callback=None, **ctx):
cb_res = callback(obj, **ctx)
if cb_res == "recurse":
pass
else:
return obj
if isinstance(obj, dict):
{k: Construer.traverse(v, callback, **ctx)
for k, v in obj.items()}
elif isinstance(obj, list):
[Construer.traverse(elem, callback, **ctx)
for elem in obj]
else:
return obj
class TermTreeConvCb(Callable):
def __init__(self):
pass
def __call__(self, obj):
progress = self.predicate(obj)
if 'convers' == progress:
new_form = { 'connected' : [], 'disjoined' : [] }
for conn, disj in map(None, obj['connected'], obj['disjoined']):
if conn:
new_form['connected'].append( tuple( [ conn, {} ] ) )
if disj:
new_form['disjoined'].append( tuple( [ disj, {} ] ) )
obj['connected'] = OD( new_form['connected'] )
obj['disjoined'] = OD( new_form['disjoined'] )
del obj['srv']
progress = 'return'
return progress
def predicate(self, obj):
if isinstance(obj, dict) and 'srv' in obj.keys():
return 'convers'
else:
return 'recurse'
class CohesionTermCb(Callable):
def __init__(self):
self.data_refining = { 'scaffolding' : { } }
self.service_fmt = "* srv: %s (%s)"
self.curr_host = None
self.curr_conn = Set([])
self.curr_disj = Set([])
def __call__(self, obj, **ctx):
progress = self.predicate(obj)
if progress == "service":
scaff_node = ctx['scaff_node']
self.ensure_def_scaff_n(scaff_node)
if obj["@portid"] not in self.data_refining['scaffolding'][scaff_node]:
srv = obj["service"]
self.form_refined_entry(srv, obj["@portid"], scaff_node)
if obj["state"]["@state"] == "open":
self.curr_conn.add(obj["@portid"])
else:
self.curr_disj.add(obj["@portid"])
# ugily depending on order
#in next host: so replenish cached conn/host data
self.replenish(scaff_node)
elif progress == "host":
hostname = obj["hostname"]
if isinstance(hostname, list):
for variant in hostname:
if variant["@type"] == "user":
self.curr_host = variant["@name"]
elif isinstance(hostname, dict):
self.curr_host = hostname["@name"]
return progress
# TODO consider Lists
@staticmethod
def append_str(str1, str2):
fmt = "%s %s"
str1 = fmt % (str1, str2)
return str1
def ensure_def_scaff_n(self, scaff_node):
if scaff_node not in self.data_refining['scaffolding'].keys():
self.data_refining['scaffolding'][scaff_node] = { }
def replenish(self, scaff_node):
# map since no zip_longest
curr_scaff_n = self.data_refining['scaffolding'][scaff_node]
for port_disj, port_con in map(None, self.curr_disj, self.curr_conn):
if port_con:
curr_scaff_n[str(port_con)]["connected"].append(self.curr_host)
if port_disj:
curr_scaff_n[str(port_disj)]["disjoined"].append(self.curr_host)
self.curr_conn = Set([])
self.curr_disj = Set([])
def predicate(self, obj):
if isinstance(obj, dict) and "@portid" in obj.keys():
return "service"
elif isinstance(obj, dict) and "hostname" in obj.keys():
return "host"
else:
return "recurse"
def form_refined_entry(self, srv, p_id, scaff_node):
new_entry = { "srv" : srv, "connected" : [], "disjoined" : [] }
self.data_refining['scaffolding'][scaff_node][p_id] = new_entry
def show(self, what):
print self.data_refining
if len(what) == 0 or 'Plain' in what:
scaffold_nodes = self.data_refining['scaffolding']
for scaffold_node, ports in scaffold_nodes.items():
print ( Fore.BLUE + "--- scaffold_node: %s" % (scaffold_node))
print(Style.RESET_ALL)
for p_id, ctx in ports.items():
print self.service_fmt % ( ctx["srv"]['@name'], p_id)
for key in [ 'connected', 'disjoined' ]:
print CohesionTermCb.append_str(key, " ".join(ctx[key]))
print "\n"
# JSON
if 'Json' in what:
print 'JSON \n'
print json.dumps(self.data_refining)
# maybe deep copy if data reused
if 'Tree' in what:
# TREE
Construer.traverse(self.data_refining, TermTreeConvCb())
tr = LeftAligned(draw=BoxStyle(gfx=BOX_HEAVY))
print(tr(self.data_refining))
class Cohesion(Construer):
"""
Depicting insights regarding distributed nodes cohesion
on several net stack layers
"""
def __init__(self, aggreg_run_data):
self.run_data = aggreg_run_data
print aggreg_run_data
def do_graphical(self):
#G=nx.Graph([(0,1),(1,2),(2,3)])
#nx.draw(G)
#plt.savefig(".png")
raise NotImplementedError
def do_term(self, args):
inter_data = json.loads(self.run_data)
run = inter_data["run"]
print (">>objective: [%s]" % (args.curr_subcmd))
cb = CohesionTermCb()
for scaffold_node in run["nmap"]:
Construer.traverse(run['nmap'][scaffold_node], cb, scaff_node=scaffold_node)
cb.show(args.format)
|
gpl-3.0
|
ARM-software/trappy
|
trappy/stats/Indexer.py
|
1
|
3165
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Indexers are responsible for providing indexes for
aggregations and provide specific functions like
unification and resampling.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import object
import pandas as pd
import numpy as np
from trappy.utils import listify
from trappy.stats import StatConf
class Indexer(object):
"""Indexer base class is an encapsulation
around the pandas Index object with some
special functionality
:param index: Pandas index object. This can be
non-unoform and non-unique
:type index: :mod:`pandas.Index`
:param traces: trappy FTrace list/singular object
:type traces: :mod:`trappy.trace.FTrace`
"""
def __init__(self, index):
self.index = index
def series(self):
"""Returns an empty series with the initialized index
"""
return pd.Series(np.zeros(len(self.index)), index=self.index)
def get_uniform(self, delta=StatConf.DELTA_DEFAULT):
"""
:param delta: Difference between two indices. This has a
default value specified in StatConf.DELTA_DEFAULT
:type delta: float
:return: A uniformly spaced index.
"""
uniform_start = self.index.values[0]
uniform_end = self.index.values[-1]
new_index = np.arange(uniform_start, uniform_end, delta)
return new_index
def get_unified_indexer(indexers):
"""Unify the List of Indexers
:param indexers: A list of indexers
:type indexers: :mod:`trappy.stats.Indexer.Indexer`
:return: A :mod:`pandas.Indexer.Indexer`
with a unfied index
"""
new_index = indexers[0].index
for idx in indexers[1:]:
new_index = new_index.union(idx.index)
return Indexer(new_index)
class MultiTriggerIndexer(Indexer):
""""The index unifies the indices of all trigger
events.
:param triggers: A (list or single) trigger
:type triggers: :mod:`trappy.stats.Trigger.Trigger`
"""
def __init__(self, triggers):
self._triggers = listify(triggers)
super(MultiTriggerIndexer, self).__init__(self._unify())
def _unify(self):
"""Function to unify all the indices of each trigger
"""
idx = pd.Index([])
for trigger in self._triggers:
trace = trigger.trace
trappy_event = getattr(trace, trigger.template.name)
idx = idx.union(trappy_event.data_frame.index)
return pd.Index(np.unique(idx.values))
|
apache-2.0
|
ContextLab/quail
|
tests/test_basic.py
|
1
|
2765
|
# -*- coding: utf-8 -*-
from quail.analysis.analysis import analyze
from quail.load import load_example_data
from quail.egg import Egg
import numpy as np
import pytest
import pandas as pd
presented=[[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]]
recalled=[[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]]
egg = Egg(pres=presented,rec=recalled)
def test_analysis_acc():
print(analyze(egg, analysis='accuracy'))
print([np.array([1.]),np.array([.75])])
assert np.array_equal(analyze(egg, analysis='accuracy').data.values,[np.array([1.]),np.array([.75])])
def test_analysis_spc():
assert np.array_equal(analyze(egg, analysis='spc').data.values,[np.array([ 1., 1., 1., 1.]),np.array([ 1., 1., 0., 1.])])
def test_analysis_spc_listgroup():
assert np.array_equal(analyze(egg, listgroup=[1,1], listname='Frank', analysis='spc').data.values,np.array([[ 1. , 1. , 0.5, 1. ]]))
def test_analysis_pfr():
assert np.array_equal(analyze(egg, analysis='pfr').data.values,[np.array([ 0., 1., 0., 0.]), np.array([ 0., 1., 0., 0.])])
def test_analysis_pfr_listgroup():
assert np.array_equal(analyze(egg, listgroup=['one','one'], analysis='pfr').data.values,np.array([[ 0., 1., 0., 0.]]))
def test_analysis_lag_crp():
# example from kahana lab lag-crp tutorial
presented=[[['1', '2', '3', '4', '5', '6', '7', '8']]]
recalled=[[['8', '7', '1', '2', '3', '5', '6', '4']]]
egg = Egg(pres=presented,rec=recalled)
assert np.allclose(analyze(egg, analysis='lagcrp').data.values,np.array([[0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.333333, 0.333333, np.nan, 0.75, 0.333333, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), equal_nan=True)
# MULTI SUBJECT
presented=[[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']],[['cat', 'bat', 'hat', 'goat'],['zoo', 'animal', 'zebra', 'horse']]]
recalled=[[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']],[['bat', 'cat', 'goat', 'hat'],['animal', 'horse', 'zoo']]]
multisubj_egg = Egg(pres=presented,rec=recalled)
def test_analysis_acc_multisubj():
assert np.array_equal(analyze(multisubj_egg, analysis='accuracy').data.values,np.array([[ 1.],[ .75],[ 1.],[ .75]]))
def test_analysis_spc_multisubj():
assert np.array_equal(analyze(multisubj_egg, analysis='spc').data.values,np.array([[ 1., 1., 1., 1.],[ 1., 1., 0., 1.],[ 1., 1., 1., 1.],[ 1., 1., 0., 1.]]))
def test_egg():
list1 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
list2 = [[[10, 20], [30, 40]], [[50, 60], [70, 80]]]
egg = Egg(pres = list1, rec = list2)
assert type(egg.pres) == pd.core.frame.DataFrame
assert type(egg.rec) == pd.core.frame.DataFrame
def test_load_example_data():
egg = load_example_data()
assert isinstance(egg, Egg)
|
mit
|
surmeierlab/neurphys
|
neurphys/read_abf.py
|
1
|
4466
|
"""
Functions to import and manipulate Axon Binary Files.
"""
from collections import OrderedDict
from neo import io
import pandas as pd
import numpy as np
def _all_ints(ii):
""" Determines if list or tuples contains only integers """
return all(isinstance(i, int) for i in ii)
def _all_strs(ii):
""" Determines if list or tuples contains only strings """
return all(isinstance(i, str) for i in ii)
def read_abf(filepath):
"""
Imports ABF file using neo io AxonIO, breaks it down by blocks
which are then processed into a multidimensional pandas dataframe
where each block corresponds to a sweep and columns represent time
and each recorded channel.
Parameters
----------
filename: str
Full filepath WITH '.abf' extension.
Return
------
df: DataFrame
Pandas DataFrame broken down by sweep.
References
----------
[1] https://neo.readthedocs.org/en/latest/index.html
"""
r = io.AxonIO(filename=filepath)
bl = r.read_block(lazy=False, cascade=True)
num_channels = len(bl.segments[0].analogsignals)
df_list = []
sweep_list = []
units = ['s']
for seg_num, seg in enumerate(bl.segments):
channels = ['primary']+['channel_{0}'.format(str(i+1))
for i in range(num_channels-1)]
signals = []
for i in range(num_channels):
data = np.array(bl.segments[seg_num].analogsignals[i].data)
signals.append(data.T[0])
unit = bl.segments[seg_num].analogsignals[i].units
units.append(str(unit).split()[-1])
data_dict = OrderedDict(zip(channels, signals))
time = seg.analogsignals[0].times - seg.analogsignals[0].times[0]
data_dict.update({'time': time})
data_dict.move_to_end('time', last=False)
df = pd.DataFrame(data_dict)
df_list.append(df)
sweep_list.append('sweep' + str(seg_num + 1).zfill(3))
df = pd.concat(df_list, keys=sweep_list, names=['sweep', 'index'])
df.channel_units = units
return df
def keep_sweeps(df, sweep_list):
"""
Keeps specified sweeps from your DataFrame.
Parameters
----------
df: Pandas DataFrame
Dataframe created using one of the functions from Neurphys.
sweep_list: 1D array_like of ints or properly formatted strings
List containing numbers of the sweeps you'd like dropped from the
DataFrame. Example: [1,4,6] or ['sweep001', 'sweep004', 'sweep006']
Return
------
keep_df: Pandas Dataframe
Dataframe containing only the sweeps you want to keep.
Notes
-----
Some type checks are made, but not enough to cover the plethora of
potential inputs, so read the docs if you're having trouble.
"""
if _all_ints(sweep_list):
keep_sweeps = [('sweep'+str(i).zfill(3)) for i in sweep_list]
elif _all_strs(sweep_list):
keep_sweeps = sweep_list
else:
raise TypeError(
'List should either be appropriate sweep names or integers')
keep_df = df.loc[keep_sweeps]
return keep_df
def drop_sweeps(df, sweep_list):
"""
Removes specified sweeps from your DataFrame.
Parameters
----------
df: Pandas DataFrame
Dataframe created using one of the functions from Neurphys. It must
be multiindexed for the function to work properly.
sweep_list: 1D array_like of ints or properly formatted strings
List containing numbers of the sweeps you'd like dropped from the
DataFrame. Example: [1,4,6] or ['sweep001', 'sweep004', 'sweep006']
Return
------
drop_df: Pandas Dataframe
Dataframe containing only the sweeps you want to keep.
Notes
-----
Making the grand assumption that the df.index.level[0]=='sweeps'
"""
if _all_ints(sweep_list):
drop_sweeps = [('sweep'+str(i).zfill(3)) for i in sweep_list]
elif _all_strs(sweep_list):
drop_sweeps = sweep_list
else:
raise TypeError(
'List should either be appropriate sweep names or integers')
all_sweeps = df.index.levels[0].values
if drop_sweeps[0] in all_sweeps:
pass
else:
raise KeyError('Cannot index a multi-index axis with these keys')
# keep_sweeps = list(set(all_sweeps).difference(drop_sweeps))
keep_sweeps = list(set(all_sweeps) ^ set(drop_sweeps))
drop_df = df.loc[keep_sweeps]
return drop_df
|
gpl-3.0
|
nmartensen/pandas
|
asv_bench/benchmarks/reshape.py
|
7
|
4225
|
from .pandas_vb_common import *
from pandas import melt, wide_to_long
class melt_dataframe(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class reshape_pivot_time_series(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
self.index = date_range('1/1/2000', periods=10000, freq='h')
self.df = DataFrame(randn(10000, 50), index=self.index, columns=range(50))
self.pdf = self.unpivot(self.df)
self.f = (lambda : self.pdf.pivot('date', 'variable', 'value'))
def time_reshape_pivot_time_series(self):
self.f()
def unpivot(self, frame):
(N, K) = frame.shape
self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), }
return DataFrame(self.data, columns=['date', 'variable', 'value'])
class reshape_stack_simple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
self.udf = self.df.unstack(1)
def time_reshape_stack_simple(self):
self.udf.stack()
class reshape_unstack_simple(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
def time_reshape_unstack_simple(self):
self.df.unstack(1)
class reshape_unstack_large_single_dtype(object):
goal_time = 0.2
def setup(self):
m = 100
n = 1000
levels = np.arange(m)
index = pd.MultiIndex.from_product([levels]*2)
columns = np.arange(n)
values = np.arange(m*m*n).reshape(m*m, n)
self.df = pd.DataFrame(values, index, columns)
self.df2 = self.df.iloc[:-1]
def time_unstack_full_product(self):
self.df.unstack()
def time_unstack_with_mask(self):
self.df2.unstack()
class unstack_sparse_keyspace(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)])
self.df = DataFrame(np.random.randn(10000, 4), index=self.index)
self.NUM_ROWS = 1000
for iter in range(10):
self.df = DataFrame({'A': np.random.randint(50, size=self.NUM_ROWS), 'B': np.random.randint(50, size=self.NUM_ROWS), 'C': np.random.randint((-10), 10, size=self.NUM_ROWS), 'D': np.random.randint((-10), 10, size=self.NUM_ROWS), 'E': np.random.randint(10, size=self.NUM_ROWS), 'F': np.random.randn(self.NUM_ROWS), })
self.idf = self.df.set_index(['A', 'B', 'C', 'D', 'E'])
if (len(self.idf.index.unique()) == self.NUM_ROWS):
break
def time_unstack_sparse_keyspace(self):
self.idf.unstack()
class wide_to_long_big(object):
goal_time = 0.2
def setup(self):
vars = 'ABCD'
nyrs = 20
nidvars = 20
N = 5000
yrvars = []
for var in vars:
for yr in range(1, nyrs + 1):
yrvars.append(var + str(yr))
self.df = pd.DataFrame(np.random.randn(N, nidvars + len(yrvars)),
columns=list(range(nidvars)) + yrvars)
self.vars = vars
def time_wide_to_long_big(self):
self.df['id'] = self.df.index
wide_to_long(self.df, list(self.vars), i='id', j='year')
|
bsd-3-clause
|
ajijohn/brainprinter
|
server/server_df.py
|
1
|
3323
|
import requests
import pandas as pd
#################################
# Table format:
# |customerName | customerEmail | inputfile |
#
#################################
def read_table(table_filename):
try:
df = pd.read_csv(table_filename)
except FileNotFoundError:
print("Could not find table.")
def process_request(request):
""" This requests a row from the table.
Usage:
process_request(df[row_number])
"""
print("Processing request for "+request['customerName']+'.')
return(True)
def send_message(customer_name,customer_email):
return requests.post(
"https://api.mailgun.net/v3/sandbox8a8eabd810b540f5a7eca93aecbec851.mailgun.org/messages",
auth=("api","key-22e20caa980e3fd1835b1105d6ea5e29"),
# files=[("attachment", open("rh.stl","rb")),("inline",open("rh.gif","rb"))],
files=[("attachment", open("rh_decim.stl.stl","rb")),("inline",open("rh.gif","rb"))],
data={"from": "Brain Printing Service <[email protected]>",
"to": [customer_email],
"subject": "Your Brain Is On Its Way",
"text": "Congratulations!!! You are one step way from your printed brain. Just bring the attached .stl file to the printer."}
#headers={'Content-type': 'multipart/form-data'}
)
if __name__ == "__main__":
# from crontab import CronTab
# cron = CronTab(tabfile='/etc/crontab')
# job = cron.new(command='/usr/bin/echo')
# job.minute.during(5,50).every(5)
import threading
import time
#TODO use requests table
table_name = "database.csv"
def ping_table():
#TODO avoid reading whole table
df = pd.read_csv(table_name)
# apply approach
# new_requests = df.loc[df["status"] == 'upload_finished']
# if len(new_requests)>0:
# new_requests.apply(process_request, axis=1)
# loop approach
recordsToProcess = df.index[df["status"] == 'upload_finished']
for id in recordsToProcess:
df.loc[id]['status'] = 'process_started'
df.to_csv(table_name,index=False) # Note:overwriting!!!
res = process_request(df.loc[id])
time.sleep(1)
if res:
df.loc[id]['status'] = 'process_ended'
df.to_csv(table_name,index=False) # Note: overwriting!!!
#TODO: modify csv in place!!!!!
recordsToSendEmail = df.index[df["status"] == 'process_ended']
for id in recordsToSendEmail:
df.loc[id]['status'] = 'email_started'
df.to_csv(table_name,index=False)
r = send_message(df.loc[id]['customerName'],df.loc[id]['customerEmail'])
print(r.status_code)
if r.status_code == 200:
df.loc[id]['status'] = 'email_sent'
df.to_csv(table_name,index=False)
# numline = len(file.readlines())
# file.close()
# rint(numline)
## assuming only one new record!!!
# if numline == old_numline:
# row = pd.read_csv(table_name,index_col=numline)
# process_request(row)
# old_numline = numline
while True:
ping_table()
time.sleep(5)
#threading.Timer(5, ping_table).start()
|
mit
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/backend_gtk3agg.py
|
8
|
3841
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
import sys
import warnings
from . import backend_agg
from . import backend_gtk3
from .backend_cairo import cairo, HAS_CAIRO_CFFI
from matplotlib.figure import Figure
from matplotlib import transforms
if six.PY3 and not HAS_CAIRO_CFFI:
warnings.warn(
"The Gtk3Agg backend is known to not work on Python 3.x with pycairo. "
"Try installing cairocffi.")
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
if self._need_redraw:
self._render_figure(w, h)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
return
else:
bbox_queue = self._bbox_queue
if HAS_CAIRO_CFFI:
ctx = cairo.Context._from_pointer(
cairo.ffi.cast('cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
for bbox in bbox_queue:
area = self.copy_from_bbox(bbox)
buf = np.fromstring(area.to_string_argb(), dtype='uint8')
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
if HAS_CAIRO_CFFI:
image = cairo.ImageSurface.create_for_data(
buf.data, cairo.FORMAT_ARGB32, width, height)
else:
image = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Agg(figure)
manager = FigureManagerGTK3Agg(canvas, num)
return manager
FigureCanvas = FigureCanvasGTK3Agg
FigureManager = FigureManagerGTK3Agg
show = backend_gtk3.show
|
mit
|
larsmans/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
10
|
4382
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
grimy55/rsr
|
run.py
|
1
|
8795
|
"""
Wrappers for running RSR processing
"""
from . import fit
from .Classdef import Async
import numpy as np
import pandas as pd
import time
from sklearn.neighbors import KDTree
def timing(func):
"""Outputs the time a function takes to execute.
"""
def func_wrapper(*args, **kwargs):
t1 = time.time()
func(*args, **kwargs)
t2 = time.time()
print("- Processed in %.1f s.\n" % (t2-t1))
return func_wrapper
def scale(amp):
"""Provide a factor to scale a set of amplitudes between 0 and 1
for correct rsr processing through fit.lmfit
"""
y, x = np.histogram(np.abs(amp), bins='fd')
pik = x[y.argmax()]
out = 1/(pik*10)
return out
def processor(amp, gain=0., bins='stone', fit_model='hk', scaling=True, **kwargs):
"""Apply RSR over a sample of amplitudes
Arguments
---------
amp: float array
Linear amplitudes
Keywords
--------
gain: float
Gain (in dB power) to add to the amplitudes
bins: string
Method to compute the bin width (inherited from numpy.histogram)
fit_model: string
Name of the function (in pdf module) to use for the fit
scaling: boolean
Whether to scale the amplitudes before processing.
That ensures a correct fit in case the amplitudes are << 1
output: string
Format of the output
Return
------
A Statfit class
"""
# Remove zero values
amp = amp[amp > 0]
# Gain and Scaling
amp = amp * 10**(gain/20.)
scale_amp = scale(amp) if scaling is True else 1
amp = amp*scale_amp
# Fit
a = fit.lmfit( np.abs(amp), bins=bins, fit_model=fit_model)
# Remove Scaling
pc = 10*np.log10( a.values['a']**2 ) - 20*np.log10(scale_amp)
pn = 10*np.log10( 2*a.values['s']**2 ) - 20*np.log10(scale_amp)
a.sample = amp/scale_amp
a.values['a'] = np.sqrt( 10**(pc/10.) )
a.values['s'] = np.sqrt( 10**(pn/10.)/2. )
# Output
if 'ID' in kwargs:
a.values['ID'] = kwargs['ID']
else:
a.values['ID'] = -1
return a
def cb_processor(a):
"""
Callback function for processor
Argument:
---------
a: class
Results from "processor" (Statfit class)
"""
p = a.power()
#print(p)
print("#%d\tCorrelation: %.3f\tPt: %.1f dB Pc: %.1f dB Pn: %.1f dB" %
(a.values['ID'], a.crl(), p['pt'], p['pc'], p['pn'] ) )
return a
def frames(x ,winsize=1000., sampling=250, **kwargs):
"""
Defines along-track frames coordinates for rsr application
Arguments
---------
x: float array
vector index
Keywords
--------
winsize: int
Number of elements in a window
sampling: int
Sampling step
"""
# Window first and last id
xa = x[:np.int(x.size-winsize):np.int(sampling)]
xb = xa + winsize-1
# Cut last window in limb
if xb[-1] > x[-1]: xb[-1] = x[-1]
xo = [val+(xb[i]-val)/2. for i, val in enumerate(xa)]
# Output
out = [ np.array([xa[i], xb[i]]).astype('int64') for i in np.arange(xa.size) ]
out = {'xa':np.array(xa, dtype=np.int64),
'xb':np.array(xb, dtype=np.int64),
'xo':np.array(xo, dtype=np.float64),
}
return out
#@timing
def along(amp, nbcores=1, verbose=True, **kwargs):
"""
RSR applied on windows sliding along a vector of amplitudes
Arguments
---------
amp: Float array
A vector of amplitudes
Keywords
--------
nbcores: int
number of cores
verbose: boolean
print results
Any keywords accepted by 'processor' and 'frames'
Return
------
"""
t1 = time.time()
#-----------
# Parameters
#-----------
# Windows along-track
x = np.arange( len(amp) ) #vector index
w = frames(x, **kwargs)
ID = np.arange(w['xa'].size)
# Jobs Definition
args, kwgs = [], []
for i in ID:
args.append( amp[w['xa'][i]: w['xb'][i]] )
#kwgs.append( dict(**kwargs, i=w['xo'][i]) )
#-----------
# Processing
#-----------
# Do NOT use the multiprocessing package
if nbcores== -1:
results = pd.DataFrame()
for i in ID:
a = processor(args[i], **kwargs, ID=w['xo'][i])
cb_processor(a)
b = {**a.values, **a.power(), 'crl':a.crl(), 'chisqr':a.chisqr,}
results = results.append(b, ignore_index=True)
out = results
# Do use the multiprocessing package
if nbcores > 0:
results = []
if verbose is True:
async_inline = Async(processor, cb_processor, nbcores=nbcores)
elif verbose is False:
async_inline = Async(processor, None, nbcores=nbcores)
for i in ID:
results.append( async_inline.call(args[i], **kwargs, ID=w['xo'][i]) )
async_inline.wait()
# Sorting Results
out = pd.DataFrame()
for i in results:
a = i.get()
b = {**a.values, **a.power(), 'crl':a.crl(), 'chisqr':a.chisqr,}
out = out.append(b, ignore_index=True)
out = out.sort_values('ID')
out['xa'] = w['xa']
out['xb'] = w['xb']
out['xo'] = w['xo']
out = out.drop('ID', 1)
t2 = time.time()
if verbose is True:
print("- Processed in %.1f s.\n" % (t2-t1))
return out
#@timing
def incircles(amp, amp_x, amp_y, circle_x, circle_y, circle_r, leaf_size=None,
nbcores=1, verbose=True, **kwargs):
"""
RSR applied over data within circles
Arguments
---------
amp: Float array
A vector of amplitudes
amp_x: Float array
X coordinates for amp
amp_y: Float array
Y coordinates for amp
circle_x: Float array
X coordinates for circles
circle_y: Float array
Y_coordinates for circles
circle_r: Float
Radius of the circles
leaf_size: Integer (Default: None)
Set the leaf size for the KD-Tree. Inherits from sklearn.
If None, use a brute force technique
Keywords
--------
leaf_size: Integer (Default: None)
Set the leaf size for the KD-Tree. Inherits from sklearn.
If None, use a brute force technique
nbcores: int
number of cores
verbose: boolean
print results
Any keywords accepted by 'processor' and 'frames'
Return
------
"""
t1 = time.time()
#-----------
# Parameters
#-----------
# Coordinates units
#if deg is True:
# metrics = 'haversine'
# amp_x = np.deg2rad(amp_x)
# amp_y = np.deg2rad(amp_y)
# circle_x = np.deg2rad(circle_x)
# circle_y = np.deg2rad(circle_y)
# circle_r = np.deg2rad(circle_r)
#else:
# metrics = 'euclidian'
# KD-Tree
if leaf_size is None:
leaf_size = len(amp)
amp_xy = np.array(list(zip(amp_x, amp_y)))
tree = KDTree(amp_xy, leaf_size=leaf_size)
# Radius Query
circle_xy = np.array(list(zip(circle_x, circle_y)))
ind = tree.query_radius(circle_xy, r=circle_r)
# Jobs Definition
ID, args, kwgs = [], [], []
for i, data_index in enumerate(ind):
if data_index.size != 0:
data = np.take(amp, data_index)
args.append(data)
ID.append(i)
#kwgs.append( dict(**kwargs, i=w['xo'][i]) )
#-----------
# Processing
#-----------
# Do NOT use the multiprocessing package
if nbcores == -1:
results = pd.DataFrame()
for i, orig_i in enumerate(ID):
a = processor(args[i], **kwargs, ID=orig_i)
cb_processor(a)
b = {**a.values, **a.power(), 'crl':a.crl(), 'chisqr':a.chisqr,}
results = results.append(b, ignore_index=True)
out = results
# Do use the multiprocessing package
if nbcores > 0:
results = []
if verbose is True:
async_inline = Async(processor, cb_processor, nbcores=nbcores)
elif verbose is False:
async_inline = Async(processor, None, nbcores=nbcores)
for i, orig_i in enumerate(ID):
results.append( async_inline.call(args[i], **kwargs, ID=orig_i) )
async_inline.wait()
# Sorting Results
out = pd.DataFrame()
for i in results:
a = i.get()
b = {**a.values, **a.power(), 'crl':a.crl(), 'chisqr':a.chisqr,}
out = out.append(b, ignore_index=True)
out = out.sort_values('ID')
#out['xa'] = w['xa']
#out['xb'] = w['xb']
#out['xo'] = w['xo']
#out = out.drop('ID', 1)
t2 = time.time()
if verbose is True:
print("- Processed in %.1f s.\n" % (t2-t1))
return out
|
mit
|
michaelStettler/HISI
|
HISI/run_mnist_HISI.py
|
1
|
5512
|
import sys
import numpy as np
import matplotlib.pylab as plt
import scipy.misc
from HISI import *
import datetime
from multiprocessing import Process
# mnist
# data = np.load("Stimuli/mnist_test.npy")
data = np.load("Stimuli/mnist_test_4bar.npy")
# data = np.load("Stimuli/mnist_train_3bar.npy")
# data = np.load("Stimuli/mnist_test_0bar_baseline.npy")
# data = np.load("Stimuli/mnist_train_0bar_baseline.npy")
show = True
obstructed = True
test = True
save = True
single_thread = False
if len(sys.argv) > 1:
data = np.load("Stimuli/"+str(sys.argv[1])+".npy")
#argument order: show, obstructed, test, save, single_thread
# show
if sys.argv[2] == 'True':
show = True
else:
show = False
# obstructed
if sys.argv[3] == 'True':
obstructed = True
else:
obstructed = False
# test
if sys.argv[4] == 'True':
test = True
else:
test = False
# save
if sys.argv[5] == 'True':
save = True
else:
save = False
# single_thread
if sys.argv[6] == 'True':
single_thread = True
else:
single_thread = False
# input = np.reshape(data[0], (28, 28)) # test: 7 train: 7
# input = np.reshape(data[1], (28, 28)) # test: 2 train: 3
# input = np.reshape(data[2], (28, 28)) # test: 1 train: 4
# input = np.reshape(data[3], (28, 28)) # test: 0 train: 6
# input = np.reshape(data[4], (28, 28)) # test: 4 -> two parts train: 1
# input = np.reshape(data[5], (28, 28)) # test: 1 train: 8
# input = np.reshape(data[6], (28, 28)) # test: 4 train: 1
# input = np.reshape(data[7], (28, 28)) # test: 9 train: 0
# input = np.reshape(data[8], (28, 28)) # test: 6 train: 9
# input = np.reshape(data[9], (28, 28)) # test: 9 -> two parts train: 8
# input = np.reshape(data[403], (28, 28)) # test: 8
# input[(input > 0) & (input < 0.5)] = 0.4
# input[input > 0] = 0.4
# input = mnist_test()
# input = mnist_test2()
# input = mnist_test3()
# input = mnist_test4()
# input = mnist_test5()
# input = mnist_test6()
# input = mnist_test7()
# input = mnist_test8()
# input = mnist_test9()
# input = mnist_test10()
num_bars = 0
if obstructed:
#train
num_bars = 3
if test:
#test
num_bars = 4
print("num bars", num_bars)
def run_mnist_hisi(start, stop):
print("From:", start, " to ", stop)
stimuli = []
stimuli2 = []
for m, mnist in enumerate(data[start:stop]):
print()
print("################################")
print("########## "+str(m + start)+" #########")
print("################################")
input = np.reshape(mnist, (28, 28))
input[(input > 0) & (input < 0.5)] = 0.4
images, boundaries = hisi(input, use_quadratic=False)
all_img = []
for i, img in enumerate(images):
if show:
plt.figure()
plt.imshow(show_matrix(img, boundaries[i]))
all_img.append(img)
# scipy.misc.toimage(img, cmin=0.0, cmax=1.0).save('img' + str(i) + '.jpg')
# np.save('img'+str(num_obj), input)
# # to save all the founded objects
# output = np.zeros((28,28))
# nb_tot_img = np.shape(all_img)[0]
# nb_img = nb_tot_img - num_bars - 2
# if nb_img > 0:
# for i in range(nb_img):
# output += all_img[2+i]
# else:
# output = all_img[1]
#
# output[output < 0] = 0
# stimuli.append(output)
# to save the input stimuli plus the first found object
# output = [all_img[0], all_img[-(num_bars + 1)]]
# output = np.reshape(output2, (np.shape(output2)[0] * np.shape(output2)[1], np.shape(output2)[2]))
# stimuli.append(output)
# to save the last object
output2 = all_img[-(num_bars + 1)]
# scipy.misc.toimage(output, cmin=0.0, cmax=1.0` ).save('results/img'+str(m)+'.jpg')
output2[output2 < 0] = 0
stimuli2.append(output2)
if show:
plt.show()
#
# if m > 0 and m % 1000 == 0:
# np.save("results/mnist_fast_lami_train_" + str(m + start), stimuli)
if save:
# np.save("temp/mnist_HISI_"+str(start), stimuli)
np.save("temp/mnist_HISI_"+str(start), stimuli2)
if __name__ == '__main__':
startSimulationTime = datetime.datetime.now()
if single_thread:
# one thread
run_mnist_hisi(0, 10)
else:
# multi processor
# train: 55000 -> 11 cores / 5000 images
# test: 10000 -> 16 cores / 625 images
if test:
npt = 625 # Number of images Per Thread
num_cores = 16
else:
npt = 5000 # Number of images Per Thread
num_cores = 11
thread_list = []
run_size = npt * num_cores
for i in range(num_cores):
batch_size = i * npt
start = batch_size
stop = start + npt
p = Process(target=run_mnist_hisi, args=(start,stop))
thread_list.append(p)
for p in thread_list:
p.start()
for p in thread_list:
p.join()
endSimulationTime = datetime.datetime.now()
print("Simulation time: " + str(endSimulationTime - startSimulationTime))
|
mit
|
wangyarui/deep-learning
|
AutoEncoder/convDAE.py
|
1
|
3032
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
print("TensorFlow Version: %s" % tf.__version__)
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0, one_hot=False)
img = mnist.train.images[20]
plt.imshow(img.reshape((28, 28)))
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs_')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets_')
conv1 = tf.layers.conv2d(inputs_, 64, (3,3), padding='same', activation=tf.nn.relu)
conv1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
conv2 = tf.layers.conv2d(conv1, 64, (3,3), padding='same', activation=tf.nn.relu)
conv2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
conv3 = tf.layers.conv2d(conv2, 32, (3,3), padding='same', activation=tf.nn.relu)
conv3 = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
conv4 = tf.image.resize_nearest_neighbor(conv3, (7,7))
conv4 = tf.layers.conv2d(conv4, 32, (3,3), padding='same', activation=tf.nn.relu)
conv5 = tf.image.resize_nearest_neighbor(conv4, (14,14))
conv5 = tf.layers.conv2d(conv5, 64, (3,3), padding='same', activation=tf.nn.relu)
conv6 = tf.image.resize_nearest_neighbor(conv5, (28,28))
conv6 = tf.layers.conv2d(conv6, 64, (3,3), padding='same', activation=tf.nn.relu)
logits_ = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
outputs_ = tf.nn.sigmoid(logits_, name='outputs_')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
noise_factor = 0.5
epochs = 10
batch_size = 128
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for idx in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
# 加入噪声
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
batch_cost, _ = sess.run([cost, optimizer],
feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{} ".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[10:20]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(outputs_,
feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
fig.tight_layout(pad=0.1)
|
unlicense
|
agsax2002/DataSciencePresentations
|
fig_code/figures.py
|
34
|
8633
|
import numpy as np
import matplotlib.pyplot as plt
import warnings
def plot_venn_diagram():
fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[]))
ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5))
ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5))
ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black'))
ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center')
ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center')
ax.text(0.0, 0.6, '$I$', size=30)
ax.axis('equal')
def plot_example_decision_tree():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
def visualize_tree(estimator, X, y, boundaries=True,
xlim=None, ylim=None):
estimator.fit(X, y)
if xlim is None:
xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1)
if ylim is None:
ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1)
x_min, x_max = xlim
y_min, y_max = ylim
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow')
plt.clim(y.min(), y.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow')
plt.axis('off')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.clim(y.min(), y.max())
# Plot the decision boundaries
def plot_boundaries(i, xlim, ylim):
if i < 0:
return
tree = estimator.tree_
if tree.feature[i] == 0:
plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k')
plot_boundaries(tree.children_left[i],
[xlim[0], tree.threshold[i]], ylim)
plot_boundaries(tree.children_right[i],
[tree.threshold[i], xlim[1]], ylim)
elif tree.feature[i] == 1:
plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k')
plot_boundaries(tree.children_left[i], xlim,
[ylim[0], tree.threshold[i]])
plot_boundaries(tree.children_right[i], xlim,
[tree.threshold[i], ylim[1]])
if boundaries:
plot_boundaries(0, plt.xlim(), plt.ylim())
def plot_tree_interactive(X, y):
from sklearn.tree import DecisionTreeClassifier
def interactive_tree(depth=1):
clf = DecisionTreeClassifier(max_depth=depth, random_state=0)
visualize_tree(clf, X, y)
from IPython.html.widgets import interact
return interact(interactive_tree, depth=[1, 5])
def plot_kmeans_interactive(min_clusters=1, max_clusters=6):
from IPython.html.widgets import interact
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
def _kmeans_step(frame=0, n_clusters=4):
rng = np.random.RandomState(2)
labels = np.zeros(X.shape[0])
centers = rng.randn(n_clusters, 2)
nsteps = frame // 3
for i in range(nsteps + 1):
old_centers = centers
if i < nsteps or frame % 3 > 0:
dist = euclidean_distances(X, centers)
labels = dist.argmin(1)
if i < nsteps or frame % 3 > 1:
centers = np.array([X[labels == j].mean(0)
for j in range(n_clusters)])
nans = np.isnan(centers)
centers[nans] = old_centers[nans]
# plot the data and cluster centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow',
vmin=0, vmax=n_clusters - 1);
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c='black', s=50)
# plot new centers if third frame
if frame % 3 == 2:
for i in range(n_clusters):
plt.annotate('', centers[i], old_centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c='black', s=50)
plt.xlim(-4, 4)
plt.ylim(-2, 10)
if frame % 3 == 1:
plt.text(3.8, 9.5, "1. Reassign points to nearest centroid",
ha='right', va='top', size=14)
elif frame % 3 == 2:
plt.text(3.8, 9.5, "2. Update centroids to cluster means",
ha='right', va='top', size=14)
return interact(_kmeans_step, frame=[0, 50],
n_clusters=[min_clusters, max_clusters])
def plot_image_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=6, fontsize=12):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 5 + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
for i in range(0, n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + 3, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
def plot_pca_interactive(data, n_components=6):
from sklearn.decomposition import PCA
from IPython.html.widgets import interact
pca = PCA(n_components=n_components)
Xproj = pca.fit_transform(data)
def show_decomp(i=0):
plot_image_components(data[i], Xproj[i],
pca.mean_, pca.components_)
interact(show_decomp, i=(0, data.shape[0] - 1));
|
mit
|
miltonsarria/dsp-python
|
arduino/ejemplo2.py
|
1
|
6249
|
#Milton Orlando Sarria
#USC - Cali
#visualizar datos provenientes de arduino de forma grafica
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import serial
import threading
import time
#######################################################################
#######################################################################
portname = '/dev/ttyACM0' #verficar el nombre de su puerto
portrate = 9600 #velocidad baudrate
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
class comObj(threading.Thread):
def __init__(self,portName,portRate):
threading.Thread.__init__(self)
self.raw_data = '0\r\n' #initial value
self.dataCount = 0 #no data
self.read = False #flag to read from serial port
self.stop = False #flag to stop the whole process
self.portName = portName
self.portRate = portRate
self.num_data = np.array([])
self.tRead = 10/1e3
def run(self):
#create the port instance
self.serPort = serial.Serial(self.portName, self.portRate)
while not(self.stop):
while self.read:
self.raw_data =self.serPort.readline()
#if it only reads '\r\n' or less, there is no data
if len(self.raw_data)>2:
try:
self.num_data=np.append(self.num_data,float(self.raw_data[:-2]))
self.dataCount+=1
except:
print('Error: no data')
#if stop the process, close the port before leaving
self.serPort.close()
return
def kill(self):
self.read=False
self.stop=True
return
def save(self,name_file):
x_data=np.arange(self.num_data.size)
X=np.vstack((x_data,self.num_data))
X=X.transpose()
np.savetxt(name_file, X, fmt='%5.5f', delimiter='\t', newline='\n', header='', footer='', comments='# ')
return
#######################################################################
#######################################################################
#######################################################################
def update(i):
buffersize = 256
y_b = np.array([])
x_b = np.array([])
if readObj.dataCount>0:
y_data=readObj.num_data
x_data=np.linspace(0,readObj.dataCount*readObj.tRead,y_data.size)
if y_data.size<buffersize:
y_b=y_data
x_b=x_data
else:
y_b=y_data[y_data.size-buffersize:]
x_b=x_data[x_data.size-buffersize:]
ax.clear()
ax.plot(x_b,y_b)
#definir el objeto que permite comunicacipn y tratamiento de datos provenientes de arduino
readObj = comObj(portname,portrate)
readObj.read=True #iniciar la lectura
#inicializar la grafica
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_autoscaley_on(True)
ax.set_ylim(-1, 1)
readObj.start() #iniciar hilo para lectura de datos
ani = FuncAnimation(fig, update, interval=500) #actualizar la grafica cada 500 ms
plt.show()
readObj.kill()
#######################################################################
#######################################################################
#######################################################################
#arduino code
'''
//sumar tres sinusoidales
float t = 0; //valor de la variable independiente
double value = 0; //resultado que se va a transmitir al pc
int t_sample = 10; //tiempo de muestreo, en ms. Separacion entre muestras, cada cuanto tiempo
//se transmite una nueva muestra al pc
const float pi=3.141592;
void setup() {
Serial.begin(9600);
}
void loop() {
//sumar tres componentes frecuenciales
value=sin(2*pi*1*t)+0.7*sin(2*pi*2*t)+0.3*sin(2*pi*4*t);
Serial.println(value,4);
t=t+float(t_sample)/1000;
delay(t_sample);
}
'''
#######################################################################
'''
//generar una onda cuadrada sumando diferentes armonicos usando las series de Fourier
float t = 0; //valor de la variable independiente
double value = 0; //resultado que se va a transmitir al pc
int t_sample = 10; //tiempo de muestreo, en ms. Separacion entre muestras, cada cuanto tiempo
//se transmite una nueva muestra al pc
const float pi=3.141592;
const float f = 2;
void setup() {
Serial.begin(9600);
}
void loop() {
//sumar tres componentes frecuenciales
float w;
int MAX=4;
if(t>5) MAX=10;
if(t>10) MAX=20;
if(t>15) MAX=40;
value=0;
for(int i=1;i<MAX;i+=2)
{
w=2*pi*f*i;
value=value+2/(i*pi)*sin(w*t);
}
Serial.println(value,4);
t=t+float(t_sample)/1000;
delay(t_sample);
}
'''
#######################################################################
'''
//generar una onda triangular sumando diferentes armonicos usando las series de Fourier
float t = 0; //valor de la variable independiente
double value = 0; //resultado que se va a transmitir al pc
int t_sample = 10; //tiempo de muestreo, en ms. Separacion entre muestras, cada cuanto tiempo
//se transmite una nueva muestra al pc
const float pi=3.141592;
const float f = 2;
void setup() {
Serial.begin(9600);
}
void loop() {
//sumar tres componentes frecuenciales
float w;
int MAX=3;
if(t>5) MAX=5;
if(t>10) MAX=8;
if(t>15) MAX=10;
if(t>20) MAX=50;
value=0;
for(int i=1;i<MAX;i++)
{
w=2*pi*f*i;
value=value+2/(pow(i,2)*pow(pi,2))*(cos(i*pi)-1)*cos(w*t);
}
value=2*value;
Serial.println(value,4);
t=t+float(t_sample)/1000;
delay(t_sample);
}
'''
|
mit
|
jgliss/pyplis
|
scripts/ex03_plume_background.py
|
1
|
11609
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliß ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pyplis example script no. 3 - Plume background analysis.
This example script introduces features related to plume background modelling
and tau image calculations.
"""
from __future__ import (absolute_import, division)
from SETTINGS import check_version
import numpy as np
from os.path import join
import pyplis
from matplotlib.pyplot import show, subplots, close
# IMPORT GLOBAL SETTINGS
from SETTINGS import SAVEFIGS, SAVE_DIR, FORMAT, DPI, IMG_DIR, OPTPARSE
# Check script version
check_version()
# SCRIPT OPTIONS
# If this is True, then sky reference areas are set in auto mode (note that
# in this case, the tests at the end of the script will fail!)
USE_AUTO_SETTINGS = False
# intensity threshold to init mask for bg surface fit
POLYFIT_2D_MASK_THRESH = 2600
# Choose the background correction modes you want to use
BG_CORR_MODES = [0, # 2D poly surface fit (without sky radiance image)
1, # Scaling of sky radiance image
4, # Scaling + linear gradient correction in x & y direction
6] # Scaling + quadr. gradient correction in x & y direction
# Image file paths relevant for this script
PLUME_FILE = join(IMG_DIR, 'EC2_1106307_1R02_2015091607065477_F01_Etna.fts')
BG_FILE = join(IMG_DIR, 'EC2_1106307_1R02_2015091607022602_F01_Etna.fts')
OFFSET_FILE = join(IMG_DIR, 'EC2_1106307_1R02_2015091607064723_D0L_Etna.fts')
DARK_FILE = join(IMG_DIR, 'EC2_1106307_1R02_2015091607064865_D1L_Etna.fts')
# SCRIPT FUNCTION DEFINITIONS
def init_background_model():
"""Create background model and define relevant sky reference areas."""
# Create background modelling object
m = pyplis.plumebackground.PlumeBackgroundModel()
# Define default gas free areas in plume image
w, h = 40, 40 # width/height of rectangles
m.scale_rect = [1280, 20, 1280 + w, 20 + h]
m.xgrad_rect = [20, 20, 20 + w, 20 + h]
m.ygrad_rect = [1280, 660, 1280 + w, 660 + h]
# Define coordinates of horizontal and vertical profile lines
# row number of profile line for horizontal corretions in the sky
# gradient...
m.xgrad_line_rownum = 40
# ... and start / stop columns for the corrections
m.xgrad_line_startcol = 20
m.xgrad_line_stopcol = 1323
# col number of profile line for vertical corretions in the sky gradient...
m.ygrad_line_colnum = 1300
# ... and start / stop rows for the corrections
m.ygrad_line_startrow = 10
m.ygrad_line_stoprow = 700
# Order of polyonmial fit applied for the gradient correction
m.ygrad_line_polyorder = 2
return m
def load_and_prepare_images():
"""Load images defined above and prepare them for the background analysis.
Returns
-------
- Img, plume image
- Img, plume image vignetting corrected
- Img, sky radiance image
"""
# get custom load method for ECII
fun = pyplis.custom_image_import.load_ecII_fits
# Load the image objects and peform dark correction
plume, bg = pyplis.Img(PLUME_FILE, fun), pyplis.Img(BG_FILE, fun)
dark, offset = pyplis.Img(DARK_FILE, fun), pyplis.Img(OFFSET_FILE, fun)
# Model dark image for tExp of plume image
dark_plume = pyplis.image.model_dark_image(plume.meta["texp"],
dark, offset)
# Model dark image for tExp of background image
dark_bg = pyplis.image.model_dark_image(bg.meta["texp"],
dark, offset)
plume.subtract_dark_image(dark_plume)
bg.subtract_dark_image(dark_bg)
# Blur the images (sigma = 1)
plume.add_gaussian_blurring(1)
bg.add_gaussian_blurring(1)
# Create vignetting correction mask from background image
vign = bg.img / bg.img.max() # NOTE: potentially includes y & x gradients
plume_vigncorr = pyplis.Img(plume.img / vign)
return plume, plume_vigncorr, bg
def autosettings_vs_manual_settings(bg_model):
"""Perform automatic retrieval of sky reference areas.
If you are lazy... (i.e. you dont want to define all these reference areas)
then you could also use the auto search function, a comparison is plotted
here.
"""
auto_params = pyplis.plumebackground.find_sky_reference_areas(plume)
current_params = bg_model.settings_dict()
fig, axes = subplots(1, 2, figsize=(16, 6))
axes[0].set_title("Manually set parameters")
pyplis.plumebackground.plot_sky_reference_areas(plume, current_params,
ax=axes[0])
pyplis.plumebackground.plot_sky_reference_areas(plume, auto_params,
ax=axes[1])
axes[1].set_title("Automatically set parameters")
return auto_params, fig
def plot_pcs_profiles_4_tau_images(tau0, tau1, tau2, tau3, pcs_line):
"""Plot PCS profiles for all 4 methods."""
fig, ax = subplots(1, 1)
tau_imgs = [tau0, tau1, tau2, tau3]
for k in range(4):
img = tau_imgs[k]
profile = pcs_line.get_line_profile(img)
ax.plot(profile, "-", label=r"Mode %d: $\phi=%.3f$"
% (BG_CORR_MODES[k], np.mean(profile)))
ax.grid()
ax.set_ylabel(r"$\tau_{on}$", fontsize=20)
ax.set_xlim([0, pcs_line.length()])
ax.set_xticklabels([])
ax.set_xlabel("PCS", fontsize=16)
ax.legend(loc="best", fancybox=True, framealpha=0.5, fontsize=12)
return fig
# SCRIPT MAIN FUNCTION
if __name__ == "__main__":
close("all")
# Create a background model with relevant sky reference areas
bg_model = init_background_model()
# Define exemplary plume cross section line
pcs_line = pyplis.LineOnImage(x0=530,
y0=730,
x1=890,
y1=300,
line_id="example PCS",
color="lime")
plume, plume_vigncorr, bg = load_and_prepare_images()
auto_params, fig0 = autosettings_vs_manual_settings(bg_model)
# Script option
if USE_AUTO_SETTINGS:
bg_model.update(**auto_params)
# Model 4 exemplary tau images
# list to store figures of tau plotted tau images
_tau_figs = []
# mask for corr mode 0 (i.e. 2D polyfit)
mask = np.ones(plume_vigncorr.img.shape, dtype=np.float32)
mask[plume_vigncorr.img < POLYFIT_2D_MASK_THRESH] = 0
# First method: retrieve tau image using poly surface fit
tau0 = bg_model.get_tau_image(plume_vigncorr,
mode=BG_CORR_MODES[0],
surface_fit_mask=mask,
surface_fit_polyorder=1)
# Plot the result and append the figure to _tau_figs
_tau_figs.append(bg_model.plot_tau_result(tau0, PCS=pcs_line))
# Second method: scale background image to plume image in "scale" rect
tau1 = bg_model.get_tau_image(plume, bg, mode=BG_CORR_MODES[1])
_tau_figs.append(bg_model.plot_tau_result(tau1, PCS=pcs_line))
# Third method: Linear correction for radiance differences based on two
# rectangles (scale, ygrad)
tau2 = bg_model.get_tau_image(plume, bg, mode=BG_CORR_MODES[2])
_tau_figs.append(bg_model.plot_tau_result(tau2, PCS=pcs_line))
# 4th method: 2nd order polynomial fit along vertical profile line
# For this method, determine tau on tau off and AA image
tau3 = bg_model.get_tau_image(plume, bg, mode=BG_CORR_MODES[3])
_tau_figs.append(bg_model.plot_tau_result(tau3, PCS=pcs_line))
fig6 = plot_pcs_profiles_4_tau_images(tau0, tau1, tau2, tau3, pcs_line)
if SAVEFIGS:
fig0.savefig(join(SAVE_DIR, "ex03_out_1.%s" % FORMAT), format=FORMAT,
dpi=DPI, transparent=True)
for k in range(len(_tau_figs)):
# _tau_figs[k].suptitle("")
_tau_figs[k].savefig(join(SAVE_DIR, "ex03_out_%d.%s"
% ((k + 2), FORMAT)),
format=FORMAT, dpi=DPI)
fig6.savefig(join(SAVE_DIR, "ex03_out_6.%s" % FORMAT), format=FORMAT,
dpi=DPI)
# IMPORTANT STUFF FINISHED (Below follow tests and display options)
# Import script options
(options, args) = OPTPARSE.parse_args()
# If applicable, do some tests. This is done only if TESTMODE is active:
# testmode can be activated globally (see SETTINGS.py) or can also be
# activated from the command line when executing the script using the
# option --test 1
if int(options.test):
import numpy.testing as npt
from os.path import basename
m = bg_model
# test settings for clear sky reference areas
npt.assert_array_equal([2680, 3960, 160, 6, 1300, 10, 700, 40, 20,
1323, 567584],
[sum(m.scale_rect),
sum(m.ygrad_rect),
sum(m.xgrad_rect),
m.mode,
m.ygrad_line_colnum,
m.ygrad_line_startrow,
m.ygrad_line_stoprow,
m.xgrad_line_rownum,
m.xgrad_line_startcol,
m.xgrad_line_stopcol,
int(m.surface_fit_mask.sum())])
m.update(**auto_params)
# test settings for clear sky reference areas
npt.assert_array_equal([2682, 4142, 1380, 6, 1337, 1, 790, 6, 672,
1343, 567584],
[sum(m.scale_rect),
sum(m.ygrad_rect),
sum(m.xgrad_rect),
m.mode,
m.ygrad_line_colnum,
m.ygrad_line_startrow,
m.ygrad_line_stoprow,
m.xgrad_line_rownum,
m.xgrad_line_startcol,
m.xgrad_line_stopcol,
int(m.surface_fit_mask.sum())])
# test all tau-modelling results
actual = [tau0.mean(), tau1.mean(), tau2.mean(), tau3.mean()]
npt.assert_allclose(actual=actual,
desired=[0.11395558008662043,
0.25279653,
0.13842879832119934,
0.13943940574220634],
rtol=1e-7)
print("All tests passed in script: %s" % basename(__file__))
try:
if int(options.show) == 1:
show()
except BaseException:
print("Use option --show 1 if you want the plots to be displayed")
|
gpl-3.0
|
jlec/numdifftools
|
numdifftools/run_benchmark.py
|
1
|
6544
|
import numpy as np
import timeit
import numdifftools as nd
import numdifftools.nd_algopy as nda
from algopy import dot
# from numpy import dot
from collections import OrderedDict
from numdifftools.core import MinStepGenerator, MaxStepGenerator
import matplotlib.pyplot as pyplot
class BenchmarkFunction(object):
def __init__(self, N):
A = np.arange(N * N, dtype=float).reshape((N, N))
self.A = np.dot(A.T, A)
def __call__(self, xi):
x = np.array(xi)
tmp = dot(self.A, x)
return 0.5 * dot(x * x, tmp)
def plot_errors(error_objects, problem_sizes, symbols):
ploterror = pyplot.semilogy
for title, funcs, results in error_objects:
pyplot.figure()
pyplot.title(title)
# ref_sol = results[0]
for i, method in enumerate(funcs):
ploterror(problem_sizes, results[i], symbols[i],
markerfacecolor='None', label=method)
pyplot.ylabel(r'Absolute error $\|g_{ref} - g\|$')
pyplot.xlabel('problem size $N$')
pyplot.ylim(loglimits(results))
pyplot.grid()
leg = pyplot.legend(loc=7)
frame = leg.get_frame()
frame.set_alpha(0.4)
pyplot.savefig(title.lower().replace(' ', '_') + '.png', format='png')
def plot_runtimes(run_time_objects, problem_sizes, symbols):
plottime = pyplot.loglog
for title, funcs, results in run_time_objects:
pyplot.figure()
pyplot.title(title)
for i, method in enumerate(funcs):
plottime(problem_sizes, results[i], symbols[i],
markerfacecolor='None', label=method)
pyplot.ylabel('time $t$')
pyplot.xlabel('problem size $N$')
pyplot.xlim(loglimits(problem_sizes))
pyplot.ylim(loglimits(results))
pyplot.grid()
leg = pyplot.legend(loc=2)
frame = leg.get_frame()
frame.set_alpha(0.4)
pyplot.savefig(title.lower().replace(' ', '_') + '.png', format='png')
def loglimits(data, border=0.05):
low, high = np.min(data), np.max(data)
scale = (high/low)**border
return low/scale, high*scale
fixed_step = MinStepGenerator(num_steps=1, use_exact_steps=True, offset=0)
epsilon = MaxStepGenerator(num_steps=14, use_exact_steps=True,
step_ratio=1.6, offset=0)
adaptiv_txt = '_adaptive_%d_%s_%d' % (epsilon.num_steps,
str(epsilon.step_ratio), epsilon.offset)
gradient_funs = OrderedDict()
nda_method = 'forward'
nda_txt = 'algopy_' + nda_method
gradient_funs[nda_txt] = nda.Jacobian(1, method=nda_method)
# gradient_funs['numdifftools'] = nd.Jacobian(1, **options)
for method in ['forward', 'central', 'complex']:
method2 = method + adaptiv_txt
gradient_funs[method] = nd.Jacobian(1, method=method, step=fixed_step)
gradient_funs[method2] = nd.Jacobian(1, method=method, step=epsilon)
HessianFun = 'Hessdiag'
ndcHessian = getattr(nd, HessianFun) # ndc.Hessian #
hessian_funs = OrderedDict()
hessian_funs[nda_txt] = getattr(nda, HessianFun)(1, method=nda_method)
for method in ['forward', 'central', 'complex']:
method2 = method + adaptiv_txt
hessian_funs[method] = ndcHessian(1, method=method, step=fixed_step)
hessian_funs[method2] = ndcHessian(1, method=method, step=epsilon)
def compute_gradients(gradient_funs, problem_sizes):
results_gradient_list = []
for N in problem_sizes:
print 'N=', N
num_methods = len(gradient_funs)
results_gradient = np.zeros((num_methods, 3))
ref_g = None
f = BenchmarkFunction(N)
for i, (_key, gradient_f) in enumerate(gradient_funs.items()):
t = timeit.default_timer()
gradient_f.f = f
preproc_time = timeit.default_timer() - t
t = timeit.default_timer()
x = 3 * np.ones(N)
val = gradient_f(x)
run_time = timeit.default_timer() - t
if ref_g is None:
ref_g = val
err = 0
norm_ref_g = np.linalg.norm(ref_g)
else:
err = np.linalg.norm(val - ref_g) / norm_ref_g
results_gradient[i] = run_time, err, preproc_time
results_gradient_list.append(results_gradient)
results_gradients = np.array(results_gradient_list) + 1e-16
print 'results_gradients=\n', results_gradients
return results_gradients
def compute_hessians(hessian_funs, problem_sizes):
print 'starting hessian computation '
results_hessian_list = []
hessian_N_list = problem_sizes
for N in hessian_N_list:
print 'N=', N
num_methods = len(hessian_funs)
results_hessian = np.zeros((num_methods, 3))
ref_h = None
f = BenchmarkFunction(N)
for i, (_key, hessian_f) in enumerate(hessian_funs.items()):
t = timeit.default_timer()
hessian_f.f = f
preproc_time = timeit.default_timer() - t
t = timeit.default_timer()
x = 3 * np.ones(N)
val = hessian_f(x)
run_time = timeit.default_timer() - t
if ref_h is None:
ref_h = val
err = 0.0
norm_ref_h = np.linalg.norm(ref_h.ravel())
else:
err = np.linalg.norm((val - ref_h).ravel()) / norm_ref_h
results_hessian[i] = run_time, err, preproc_time
results_hessian_list.append(results_hessian)
results_hessians = np.array(results_hessian_list) + 1e-16
print hessian_N_list
print 'results_hessians=\n', results_hessians
return results_hessians
if __name__ == '__main__':
problem_sizes = (4, 8, 16, 32, 64, 96)
symbols = ('-kx', ':k>', ':k<', '--k^', '--kv', '-kp', '-ks',
'b', '--b', '-k+')
results_gradients = compute_gradients(gradient_funs, problem_sizes)
results_hessians = compute_hessians(hessian_funs, problem_sizes)
print(results_gradients.shape)
run_time_objects = [('Jacobian run times',
gradient_funs, results_gradients[..., 0].T),
('Hessian run times',
hessian_funs, results_hessians[..., 0].T)]
error_objects = [('Jacobian errors',
gradient_funs, results_gradients[..., 1].T),
('Hessian errors',
hessian_funs, results_hessians[..., 1].T)]
plot_runtimes(run_time_objects, problem_sizes, symbols)
plot_errors(error_objects, problem_sizes, symbols)
|
bsd-3-clause
|
RuthAngus/kalesalad
|
code/rotation.py
|
1
|
7463
|
"""
A system for measuring rotation periods.
This contains functions for measuring rotation.
"""
import os
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import filtering as flt
import kplr
client = kplr.API()
import kepler_data as kd
from astropy.stats import LombScargle
class prot(object):
"""
Given a star object with a kepid or x, y and yerr values, measure the
rotation period.
__init__ downloads the light curve if it doesn't already exist.
pgram_ps measures a periodogram rotation period.
"""
def __init__(self, kepid=None, x=None, y=None, yerr=None,
LC_DIR="/Users/ruthangus/.kplr/data/lightcurves"):
"""
params:
------
kepid: (int)
The KIC id.
x: (array)
The time array.
y: (array)
The flux array.
yerr: (array)
The flux uncertainty array.
"""
self.kepid = kepid
# If x, y and yerr are not provided, load them.
if not np.array([x, y, yerr]).any():
lc_path = os.path.join(LC_DIR, str(kepid).zfill(9))
# If you don't have the light curves, download them.
if not os.path.exists(lc_path):
print("Downloading light curve...")
star = client.star(kepid)
star.get_light_curves(fetch=True, short_cadence=False)
print("Loading light curve...")
self.x, self.y, self.yerr = kd.load_kepler_data(lc_path)
else:
self.x, self.y, self.yerr = x, y, yerr
def pgram_ps(self, filter_period=35., cutoff=100, plot=False,
clobber=False):
"""
Measure a periodogram rotation period
parameters:
----------
filter_period: (float or None)
if None, the lc is not filtered.
if float, the lc is high-pass filtered with a cutoff of
filter_period.
cutoff: (int)
The maximum period in the grid. Recommend 30 days for K2 data.
plot: (bool)
If true the periodogram is plotted and saved.
clobber: (bool)
If true any existing periodogram file is overwritten.
returns:
-------
pgram_period: (float)
The rotation period
pgram_period_err: (float)
The formal uncertainty on the period.
Adds self.pgram_period, self.pgram_period.err, self.pgram and self.ps.
"""
pgram_fname = "pgrams/{}_pgram".format(self.kepid)
if not os.path.exists("pgrams"):
os.mkdir("pgrams")
# Fit and remove a straight line
sx = self.x - self.x[0]
AT = np.vstack((sx, np.ones_like(sx)))
ATA = np.dot(AT, AT.T)
m, c = np.linalg.solve(ATA, np.dot(AT, self.y))
self.y -= m*sx + c
# Renormalise
med = np.median(self.y)
self.y = self.y/med - 1
self.yerr = self.yerr/med
if clobber:
freq = np.linspace(1./cutoff, 1./.1, 100000)
ps = 1./freq
# Filter data
if filter_period:
filter_period = filter_period # days
fs = 1./(self.x[1] - self.x[0])
lowcut = 1./filter_period
yfilt = flt.butter_bandpass_filter(self.y, lowcut, fs, order=3,
plot=False)
else:
yfilt = self.y*1
print("Calculating periodogram")
pgram = LombScargle(self.x, yfilt, self.yerr).power(freq)
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] <
pgram[i] and pgram[i+1] < pgram[i]])
presults = pd.DataFrame({"periods": ps, "power": pgram})
presults.to_csv("{}.csv".format(pgram_fname))
print("saving pgram to {}.csv".format(pgram_fname))
else: # If not clobber, look for old result.
print("looking for {}.csv".format(pgram_fname))
# If pgram already exists
if os.path.exists("{}.csv".format(pgram_fname)) and not clobber:
print("{}.csv found, loading pre-exisiting periodogram"
.format(pgram_fname))
pr = pd.read_csv("{}.csv".format(pgram_fname))
ps, pgram = pr.periods.values, pr.power.values
else: # If pgram does not exist.
freq = np.linspace(1./100, 1./.1, 100000)
ps = 1./freq
filter_period = 35. # days
fs = 1./(self.x[1] - self.x[0])
lowcut = 1./filter_period
yfilt = flt.butter_bandpass_filter(self.y, lowcut, fs, order=3,
plot=False)
print("Calculating periodogram")
pgram = LombScargle(self.x, yfilt, self.yerr).power(freq)
presults = pd.DataFrame({"periods": ps, "power": pgram})
presults.to_csv("{}.csv".format(pgram_fname))
print("saving pgram to {}.csv".format(pgram_fname))
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] <
pgram[i] and pgram[i+1] < pgram[i]])
pgram_period = ps[pgram == max(pgram[peaks])][0]
if plot:
pl.clf()
pl.subplot(2, 1, 1)
pl.plot(self.x-self.x[0], self.y, "k.", ms=3)
# pl.plot(self.x - self.x[0], m*(self.x - self.x[0]) + c)
# pl.plot(self.x - self.x[0], self.y - m*(self.x - self.x[0]) + c)
pl.xlim(0, 50)
pl.title("Period = {0:.2f} days".format(pgram_period))
pl.subplot(2, 1, 2)
pl.plot(ps, pgram)
pl.axvline(pgram_period, color="orange", ls="--")
pl.xlabel("Period (days)")
pl.savefig(pgram_fname)
print("saving plot as {}.png".format(pgram_fname))
# Calculate the uncertainty.
_freq = 1./pgram_period
pgram_freq_err = self.calc_pgram_uncertainty(_freq)
frac_err = pgram_freq_err/_freq
pgram_period_err = pgram_period * frac_err
self.pgram_period = pgram_period
self.pgram_period_err = pgram_period_err
self.pgram = pgram
self.ps = ps
return pgram_period, pgram_period_err
def calc_pgram_uncertainty(self, freq):
"""
Calculate the formal uncertainty on the periodogram period (1/freq).
"""
phase, A = self.calc_phase_and_amp(freq)
y_noise = self.y - A**2*np.sin(2*np.pi*freq*self.x + phase)
sigma_n = np.var(y_noise)
N, T = len(self.x), self.x[-1] - self.x[0]
return 3 * np.pi * sigma_n / (2 * N**.5 * T * A)
def calc_phase_and_amp(self, f):
"""
Phase and amplitude calculation for the calc_pgram_uncertainty
function.
"""
AT = np.vstack((self.x, np.ones((3, len(self.y)))))
ATA = np.dot(AT, AT.T)
arg = 2*np.pi*f*self.x
AT[-2, :] = np.sin(arg)
AT[-1, :] = np.cos(arg)
v = np.dot(AT[:-2, :], AT[-2:, :].T)
ATA[:-2, -2:] = v
ATA[-2:, :-2] = v.T
ATA[-2:, -2:] = np.dot(AT[-2:, :], AT[-2:, :].T)
w = np.linalg.solve(ATA, np.dot(AT, self.y))
A, B = w[-1], w[-2]
phase = np.arctan(A/B)
Amp = (np.abs(A) + np.abs(B))**.5
return phase, Amp
|
mit
|
giorgiop/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
176
|
12155
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
zooniverse/aggregation
|
experimental/penguins/clusterAnalysis/blankPhotos.py
|
2
|
2205
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import urllib
import matplotlib.cbook as cbook
client = pymongo.MongoClient()
db = client['penguin_2014-09-24']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
errorCount = 0
initial_consecutive_blanks = 5
blank_classifications = {}
urls = {}
i = 0
for r in collection.find():#{"classification_count": {"$gt": 0}}):
assert r["annotations"][0]["key"] == "animalsPresent"
if not(r["annotations"][0]["value"] in ["yes","no","cant_tell"]):
#print r["annotations"]
errorCount += 1
continue
i += 1
if (i%25000) == 0:
print i
zooniverse_id = r["subjects"][0]["zooniverse_id"]
blank_image = (r["annotations"][0]["value"] != "yes")
if not(zooniverse_id in blank_classifications):
blank_classifications[zooniverse_id] = []
r2 = collection2.find_one({"zooniverse_id":zooniverse_id})
urls[zooniverse_id] = r2["metadata"]["path"]#r["subjects"][0]["location"]["standard"]
if blank_image:
blank_classifications[zooniverse_id].append(0)
else:
blank_classifications[zooniverse_id].append(1)
#print errorCount
false_blank_counter = 0
true_blank_counter = 0
total_counter = 0
nowRetire = 0
actuallyBlank = 0
notBlank =0
for zooniverse_id in blank_classifications:
#were the first initial X classifications all blank?
#based on the threshold variable initial_consecutive_blanks
#read this in as the gold standard
b = blank_classifications[zooniverse_id]
if len(b) < 10:
continue
total_counter += 1
#could we do better with a different threshold?
if (len(b) >= initial_consecutive_blanks) and (not(1 in b[:initial_consecutive_blanks])):
#we now think this is a blank image
if not(1 in b[:10]):
#and we would still think this image is blank at the end of 10 classifications
actuallyBlank += 1
else:
notBlank += 1
print urls[zooniverse_id]
#print actuallyBlank
#print notBlank
|
apache-2.0
|
hetaodie/hetaodie.github.io
|
assets/media/uda-ml/fjd/ica/创建客户细分/visuals.py
|
3
|
6061
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = list(good_data.keys()))
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def biplot(good_data, reduced_data, pca):
'''
Produce a biplot that shows a scatterplot of the reduced
data and the projections of the original features.
good_data: original data, before transformation.
Needs to be a pandas dataframe with valid column names
reduced_data: the reduced data (the first two dimensions are plotted)
pca: pca object that contains the components_ attribute
return: a matplotlib AxesSubplot object (for any additional customization)
This procedure is inspired by the script:
https://github.com/teddyroland/python-biplot
'''
fig, ax = plt.subplots(figsize = (14,8))
# scatterplot of the reduced data
ax.scatter(x=reduced_data.loc[:, 'Dimension 1'], y=reduced_data.loc[:, 'Dimension 2'],
facecolors='b', edgecolors='b', s=70, alpha=0.5)
feature_vectors = pca.components_.T
# we use scaling factors to make the arrows easier to see
arrow_size, text_pos = 7.0, 8.0,
# projections of the original features
for i, v in enumerate(feature_vectors):
ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1],
head_width=0.2, head_length=0.2, linewidth=2, color='red')
ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black',
ha='center', va='center', fontsize=18)
ax.set_xlabel("Dimension 1", fontsize=14)
ax.set_ylabel("Dimension 2", fontsize=14)
ax.set_title("PC plane with original feature projections.", fontsize=16);
return ax
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print("Dataset could not be loaded. Is the file missing?")
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled");
|
mit
|
rrohan/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
alexholcombe/spatiotopic-motion
|
analyzeDataTestingVersion.py
|
1
|
6964
|
from psychopy.tools import filetools
import inspect
import numpy as np
import psychopy_ext.stats
import psychopy_ext.plot
import pandas
from calcUnderOvercorrect import calcOverCorrected
from plotHelpers import agrestiCoull95CI
#grab some data outputted from my program, so I can test some analysis code
##The psydat file format is literally just a pickled copy of the TrialHandler object that saved it. You can open it with:
##dat = tools.filetools.fromFile(dataFileName)
#dataFileName = "data/Hubert_spatiotopicMotion_03Dec2014_15-49.psydat"
#dataFileName="data/Hubert_spatiotopicMotion_11Dec2014_13-00_DataFrame.pickle"
#dataFileName ="data/Hubert_spatiotopicMotion_15Dec2014_15-18_PSYCHOPY.txt"
dataFileName="data/Alex_spatiotopicMotion_15Dec2014_16-25_DataFrame.pickle"
if dataFileName.endswith('.pickle'):
df = filetools.fromFile(dataFileName)
elif dataFileName.endswith('.txt'):
df = pandas.read_csv(dataFileName, delimiter='\t')
print "type(df)=", type(df) # <class 'pandas.core.frame.DataFrame'>
print "df.dtypes=",df.dtypes #all "objects" for some reason
#strings in pandas pretty much objects. Dont know why can't force it to be a string, this is supposed to work http://stackoverflow.com/questions/22005911/convert-columns-to-string-in-pandas
#Now I can test aggregate
#dat.data seems to contain the columns I added
print "df.head=\n", df.head()
#add overcorrect to cases where tilt==0
tilt = df.loc[:,'tilt']
neutralStimIdxs = (tilt==0)
#print('neutralStimIdxs=\n',neutralStimIdxs)
if len(neutralStimIdxs)>1:
if neutralStimIdxs.any(): #Calculate over/under-correction, which is only interpretable when tilt=0
forCalculatn = df.loc[neutralStimIdxs, ['tilt','startLeft','upDown','respLeftRight']]
overCorrected = calcOverCorrected( forCalculatn )
print 'overCorrected=\n', overCorrected
df['overCorrected']= np.nan
df.loc[neutralStimIdxs, 'overCorrected'] = overCorrected
#test plotting of data
#dataframe aggregate
grouped = df.groupby('tilt')
tiltMeans = grouped.mean()
print "mean at each tilt =\n", tiltMeans
print "tiltMeans.index = ", tiltMeans.index #there is no column called 'tilt', instead it's the actual index, kinda like row names
grouped = df.groupby(['startLeft','tilt'])
for name in grouped: #this works
print name
grouped.get_group((True, 0.4)) #combo of startLeft and tilt
print 'groups=', grouped.groups #works
dirTilt = grouped.mean() #this is a dataframe, not a DataFrameGroupBy
print "mean at each dir, tilt =\n", dirTilt
print "dirTilt.index = ", dirTilt.index #there is no column called 'tilt', instead it's the actual index, kinda like row names
# MultiIndex [(False, -0.4), (False, 0.0), (False, 0.4), (True, -0.4), (True, 0.0), (True, 0.4)]
#dirTilt.groups no groups, maybe because dataframe?
#dirTilt = dirTilt.reset_index() #thanks Chris Said, except it *reduces* the number of cases that work below by one
try:
print "dirTilt.loc[True]=\n", dirTilt.loc[True] #works!!!!
except: pass
try:
print "dirTilt.loc[0.4]=\n", dirTilt.loc[0.4] #doesnt work I presume because second dimension
except: pass
try:
print "dirTilt.loc[True, 0.4]=\n", dirTilt.loc[True, 0.4] #works!!!
except: pass
try:
print "dirTilt.loc['True']=\n", dirTilt.loc['True'] #doesnt work
except: pass
try:
print "dirTilt.loc['True','0.4']=\n", dirTilt.loc['True','0.4'] #doesnt work
except: pass
#dirTilt.select()
usePsychopy_ext = False
if usePsychopy_ext:
#have to use psychopy_ext to aggregate
ag = psychopy_ext.stats.aggregate(df, values="respLeftRight", cols="tilt") #, values=None, subplots=None, yerr=None, aggfunc='mean', order='natural')
print "ag = \n", ag
plt = psychopy_ext.plot.Plot()
plt.plot(ag, kind='line')
print "Showing plot with psychopy_ext.stats.aggregate"
plt.show()
dirTilt = dirTilt.reset_index() #back into columns
leftwardM = dirTilt[ dirTilt['startLeft']==False ]
rightwardM = dirTilt[ dirTilt['startLeft']==True ]
print 'dirTilt=\n', dirTilt
import pylab
ax1 = pylab.subplot(121)
pylab.scatter(leftwardM['tilt'], leftwardM['respLeftRight'],
edgecolors=(1,0,0), facecolor=(1,0,0), label='leftward saccade')
pylab.scatter(rightwardM['tilt'], rightwardM['respLeftRight'],
edgecolors=(0,1,0), facecolor=(0,1,0), label='rightward saccade')
pylab.legend()
print str( round( 100*df['overCorrected'].mean(), 2) )
msg = 'proportn overCorrected at 0 tilt = ' + str( round( 100*df['overCorrected'].mean(), 2) ) + \
'% of ' + str( df['overCorrected'].count() ) + ' trials'
msg2= ' 95% Agresti-Coull CI = ' + \
str( np.round( agrestiCoull95CI(df['overCorrected'].sum(), df['overCorrected'].count()), 2) )
pylab.text(0.5, 0.55, msg, horizontalalignment='left', fontsize=12)
pylab.text(0.5,0.45, msg2, horizontalalignment='left', fontsize=12)
#pylab.ylim([-0.01,1.01])
#pylab.xlim([-2,102])
pylab.xlabel("tilt")
pylab.ylabel("proportion respond 'right'")
#psychometric curve basics
tiltMin = min( df['tilt'] )
tiltMax = max( df['tilt'] )
x = np.linspace(tiltMin, tiltMax, 50)
#test function fitting
#fit curve
import scipy, sys
def logistic(x, x0, k):
y = 1 / (1 + np.exp(-k*(x-x0)))
return y
def inverseLogistic(y, x0, k):
linear = np.log ( y / (1-y) )
#linear = -k*(x-x0)
#x-x0 = linear/-k
#x= linear/-k + x0
x = linear/-k + x0
return x
#scipy.stats.logistic.fit
paramsLeft = None; paramsRight = None
try:
paramsLeft, pcov = scipy.optimize.curve_fit(logistic, leftwardM['tilt'], leftwardM['respLeftRight'], p0 = [0, 6])
except Exception as e:
print 'leftward fit failed ', e #sys.exc_info()[0]
try:
paramsRight, pcov = scipy.optimize.curve_fit(logistic, rightwardM['tilt'], rightwardM['respLeftRight'], p0 = [0, 6])
except Exception as e:
print 'rightward fit failed ', e #sys.exc_info()[0]
threshVal = 0.5
pylab.plot([tiltMin, tiltMax],[threshVal,threshVal],'k--') #horizontal dashed line
overCorrectAmts = list()
if paramsLeft is not None:
pylab.plot(x, logistic(x, *paramsLeft) , 'r-')
print paramsLeft
threshL = inverseLogistic(threshVal, paramsLeft[0], paramsLeft[1])
print 'threshL = ', threshL
overCorrectAmts.append(threshL)
pylab.plot([threshL, threshL],[0,threshVal],'g--') #vertical dashed line
pylab.title('PSE (%.2f) = %0.3f & %0.3f' %(threshVal, threshL, threshR))
if paramsRight is not None:
pylab.plot(x, logistic(x, *paramsRight) , 'g-')
threshR = inverseLogistic(threshVal, paramsRight[0], paramsRight[1])
print 'threshR = ', threshR
overCorrectAmts.append(-1*threshR)
pylab.plot([threshR, threshR],[0,threshVal],'g--') #vertical dashed line
pylab.title('threshold (%.2f) = %0.3f' %(threshVal, threshR))
pylab.show()
if len(overCorrectAmts)==0:
print 'Failed both fits so cant tell you average over/under correction amount'
else:
print 'Average overcorrection (negative means undercorrection) = ', np.mean(overCorrectAmts)
|
mit
|
mlyundin/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
terrytangyuan/metric-learn
|
metric_learn/itml.py
|
1
|
7057
|
"""
Information Theoretic Metric Learning, Kulis et al., ICML 2007
ITML minimizes the differential relative entropy between two multivariate
Gaussians under constraints on the distance function,
which can be formulated into a Bregman optimization problem by minimizing the
LogDet divergence subject to linear constraints.
This algorithm can handle a wide variety of constraints and can optionally
incorporate a prior on the distance function.
Unlike some other methods, ITML does not rely on an eigenvalue computation
or semi-definite programming.
Adapted from Matlab code at http://www.cs.utexas.edu/users/pjain/itml/
"""
from __future__ import print_function, absolute_import
import numpy as np
from six.moves import xrange
from sklearn.metrics import pairwise_distances
from sklearn.utils.validation import check_array, check_X_y
from .base_metric import BaseMetricLearner
from .constraints import Constraints
from ._util import vector_norm
class ITML(BaseMetricLearner):
"""Information Theoretic Metric Learning (ITML)"""
def __init__(self, gamma=1., max_iter=1000, convergence_threshold=1e-3,
A0=None, verbose=False):
"""Initialize ITML.
Parameters
----------
gamma : float, optional
value for slack variables
max_iter : int, optional
convergence_threshold : float, optional
A0 : (d x d) matrix, optional
initial regularization matrix, defaults to identity
verbose : bool, optional
if True, prints information while learning
"""
self.gamma = gamma
self.max_iter = max_iter
self.convergence_threshold = convergence_threshold
self.A0 = A0
self.verbose = verbose
def _process_inputs(self, X, constraints, bounds):
self.X_ = X = check_array(X)
# check to make sure that no two constrained vectors are identical
a,b,c,d = constraints
no_ident = vector_norm(X[a] - X[b]) > 1e-9
a, b = a[no_ident], b[no_ident]
no_ident = vector_norm(X[c] - X[d]) > 1e-9
c, d = c[no_ident], d[no_ident]
# init bounds
if bounds is None:
self.bounds_ = np.percentile(pairwise_distances(X), (5, 95))
else:
assert len(bounds) == 2
self.bounds_ = bounds
self.bounds_[self.bounds_==0] = 1e-9
# init metric
if self.A0 is None:
self.A_ = np.identity(X.shape[1])
else:
self.A_ = check_array(self.A0)
return a,b,c,d
def fit(self, X, constraints, bounds=None):
"""Learn the ITML model.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
constraints : 4-tuple of arrays
(a,b,c,d) indices into X, with (a,b) specifying positive and (c,d)
negative pairs
bounds : list (pos,neg) pairs, optional
bounds on similarity, s.t. d(X[a],X[b]) < pos and d(X[c],X[d]) > neg
"""
a,b,c,d = self._process_inputs(X, constraints, bounds)
gamma = self.gamma
num_pos = len(a)
num_neg = len(c)
_lambda = np.zeros(num_pos + num_neg)
lambdaold = np.zeros_like(_lambda)
gamma_proj = 1. if gamma is np.inf else gamma/(gamma+1.)
pos_bhat = np.zeros(num_pos) + self.bounds_[0]
neg_bhat = np.zeros(num_neg) + self.bounds_[1]
pos_vv = self.X_[a] - self.X_[b]
neg_vv = self.X_[c] - self.X_[d]
A = self.A_
for it in xrange(self.max_iter):
# update positives
for i,v in enumerate(pos_vv):
wtw = v.dot(A).dot(v) # scalar
alpha = min(_lambda[i], gamma_proj*(1./wtw - 1./pos_bhat[i]))
_lambda[i] -= alpha
beta = alpha/(1 - alpha*wtw)
pos_bhat[i] = 1./((1 / pos_bhat[i]) + (alpha / gamma))
Av = A.dot(v)
A += np.outer(Av, Av * beta)
# update negatives
for i,v in enumerate(neg_vv):
wtw = v.dot(A).dot(v) # scalar
alpha = min(_lambda[i+num_pos], gamma_proj*(1./neg_bhat[i] - 1./wtw))
_lambda[i+num_pos] -= alpha
beta = -alpha/(1 + alpha*wtw)
neg_bhat[i] = 1./((1 / neg_bhat[i]) - (alpha / gamma))
Av = A.dot(v)
A += np.outer(Av, Av * beta)
normsum = np.linalg.norm(_lambda) + np.linalg.norm(lambdaold)
if normsum == 0:
conv = np.inf
break
conv = np.abs(lambdaold - _lambda).sum() / normsum
if conv < self.convergence_threshold:
break
lambdaold = _lambda.copy()
if self.verbose:
print('itml iter: %d, conv = %f' % (it, conv))
if self.verbose:
print('itml converged at iter: %d, conv = %f' % (it, conv))
self.n_iter_ = it
return self
def metric(self):
return self.A_
class ITML_Supervised(ITML):
"""Information Theoretic Metric Learning (ITML)"""
def __init__(self, gamma=1., max_iter=1000, convergence_threshold=1e-3,
num_labeled=np.inf, num_constraints=None, bounds=None, A0=None,
verbose=False):
"""Initialize the supervised version of `ITML`.
`ITML_Supervised` creates pairs of similar sample by taking same class
samples, and pairs of dissimilar samples by taking different class
samples. It then passes these pairs to `ITML` for training.
Parameters
----------
gamma : float, optional
value for slack variables
max_iter : int, optional
convergence_threshold : float, optional
num_labeled : int, optional (default=np.inf)
number of labeled points to keep for building pairs. Extra
labeled points will be considered unlabeled, and ignored as such.
Use np.inf (default) to use all labeled points.
num_constraints: int, optional
number of constraints to generate
bounds : list (pos,neg) pairs, optional
bounds on similarity, s.t. d(X[a],X[b]) < pos and d(X[c],X[d]) > neg
A0 : (d x d) matrix, optional
initial regularization matrix, defaults to identity
verbose : bool, optional
if True, prints information while learning
"""
ITML.__init__(self, gamma=gamma, max_iter=max_iter,
convergence_threshold=convergence_threshold,
A0=A0, verbose=verbose)
self.num_labeled = num_labeled
self.num_constraints = num_constraints
self.bounds = bounds
def fit(self, X, y, random_state=np.random):
"""Create constraints from labels and learn the ITML model.
Parameters
----------
X : (n x d) matrix
Input data, where each row corresponds to a single instance.
y : (n) array-like
Data labels.
random_state : numpy.random.RandomState, optional
If provided, controls random number generation.
"""
X, y = check_X_y(X, y)
num_constraints = self.num_constraints
if num_constraints is None:
num_classes = len(np.unique(y))
num_constraints = 20 * num_classes**2
c = Constraints.random_subset(y, self.num_labeled,
random_state=random_state)
pos_neg = c.positive_negative_pairs(num_constraints,
random_state=random_state)
return ITML.fit(self, X, pos_neg, bounds=self.bounds)
|
mit
|
janscience/thunderfish
|
thunderfish/pulsetracker.py
|
3
|
41161
|
"""
by Dexter Frueh
"""
import sys
import numpy as np
import copy
from scipy import stats
from scipy import signal
from scipy import optimize
import matplotlib
#from fish import ProgressFish
import matplotlib.pyplot as plt
from thunderfish.dataloader import open_data
from thunderfish.eventdetection import detect_peaks
from scipy.interpolate import interp1d
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from collections import deque
import ntpath
import time
import os
from shutil import copy2
from collections import OrderedDict
def makeeventlist(main_event_positions,side_event_positions,data,event_width=20):
"""
Generate array of events that might be EODs of a pulse-type fish, using the location of peaks and troughs,
the data and an optional width of an supposed EOD-event.
The generated event-array contains location and height of such events.
The height of the events is calculated by its height-difference to nearby troughs and main events that have no side events in a range closer than event_width are discarded and not considered as EOD event.
Parameters
----------
main_event_positions: array of int or float
Positions of the detected main events in the data time series. Either peaks or troughs.
side_event_positions: array of int or float
Positions of the detected side events in the data time series. The complimentary event to the main events.
data: array of float
The given data.
event_width: int or float, optional
Returns
-------
EOD_events: ndarray
2D array containing data with 'np.float' type, size (number_of_properties = 3, number_of_events).
Generated and combined data of the detected events in an array with arrays of x, y and height along the first axis.
"""
mainfirst = int((min(main_event_positions[0],side_event_positions[0])<side_event_positions[0])) # determines if there is a peak or through first. Evaluates to 1 if there is a peak first.
main_x = main_event_positions
main_y = data[main_event_positions]
# empty placeholders, filled in the next step while iterating over the properties of single main
main_h = np.zeros(len(main_event_positions))
main_real = np.ones(len(main_event_positions))
# iteration over the properties of the single main
for ind,(x, y, h, r) in enumerate(np.nditer([main_x, main_y, main_h, main_real], op_flags=[["readonly"],['readonly'],['readwrite'],['readwrite']])):
l_side_ind = ind - mainfirst
r_side_ind = l_side_ind + 1
try:
r_side_x = side_event_positions[r_side_ind]
r_distance = r_side_x - x
r_side_y = data[r_side_x]
except:
pass
try:
l_side_x = side_event_positions[l_side_ind]
l_distance = x - l_side_x
l_side_y = data[l_side_x]
except:
pass # ignore left or rightmost events which throw IndexError
# calculate distances to the two side events next to the main event and mark all events where the next side events are not closer than maximum event_width as unreal. If an event might be an EOD, then calculate its height.
if l_side_ind >= 0 and r_side_ind < len(side_event_positions):
if min((l_distance),(r_distance)) > event_width:
r[...] = False
elif max((l_distance),(r_distance)) <= event_width:
h[...] = max(abs(y-l_side_y),abs(y-r_side_y)) #calculated using absolutes in case of for example troughs instead of peaks as main events
else:
if (l_distance)<(r_distance): # evaluated only when exactly one side event is out of reach of the event width. Then the closer event will be the correct event
h[...] = abs(y-l_side_y)
else:
h[...] = abs(y-r_side_y)
# check corner cases
elif l_side_ind == -1:
if r_distance > event_width:
r[...] = False
else:
h[...] = y-r_side_y
elif r_side_ind == len(side_event_positions):
if l_distance> event_width:
r[...] = False
else:
h[...] = y-l_side_y
# generate return array and discard all events that are not marked as real
EOD_events = np.array([main_x, main_y, main_h], dtype = np.float)[:,main_real==1]
return EOD_events
def discardnearbyevents(event_locations, event_heights, min_distance):
"""
Given a number of events with given location and heights, returns a selection
of these events where no event is closer than eventwidth to the next event.
Among neighboring events closer than eventwidth the event with smaller height
is discarded.
Used to discard sidepeaks in detected multiple peaks of single EOD-pulses and
only keep the largest event_height and the corresponding location as
representative of the whole EOD pulse.
Parameters
----------
event_locations: array of int or float
Positions of the given events in the data time series.
event_heights: array of int or float
Heights of the given events, indices refer to the same events as in
event_locations.
min_distance: int or float
minimal distance between events before one of the events gets discarded.
Returns
-------
event_locations: array of int or float
Positions of the returned events in the data time series.
event_heights: array of int or float
Heights of the returned events, indices refer to the same events as in
event_locations.
"""
unchanged = False
counter = 0
event_indices = np.arange(0,len(event_locations)+1,1)
while unchanged == False:# and counter<=200:
x_diffs = np.diff(event_locations)
events_delete = np.zeros(len(event_locations))
for i, diff in enumerate(x_diffs):
if diff < min_distance:
if event_heights[i+1] > event_heights[i] :
events_delete[i] = 1
else:
events_delete[i+1] = 1
event_heights = event_heights[events_delete!=1]
event_locations = event_locations[events_delete!=1]
event_indices = event_indices[np.where(events_delete!=1)[0]]
if np.count_nonzero(events_delete)==0:
unchanged = True
counter += 1
if counter > 2000:
print('Warning: unusual many discarding steps needed, unusually dense events')
pass
return event_indices, event_locations, event_heights
def crosscorrelation(sig, data):
'returns crosscorrelation of two arrays, the first array should have a length equal to or smaller than the second array.'
return signal.fftconvolve(data, sig[::-1], mode='valid')
def interpol(data, kind):
"""
interpolates the given data using scipy interpolation python package
Parameters
----------
data: array
kind: string or int
(‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’), or integer of order of spline interpolation to be used
Returns
-------
interpolation: function
"""
width = len(data)
x = np.linspace(0, width-1, num = width, endpoint = True)
#return interp1d(x, data[0:width], kind, assume_sorted=True)
return interp1d(x, data[0:width], kind)
def interpolated_array(data, kind, int_fact):
"""
returns an interpolated array of the given dataarray.
Parameters
----------
data: array
kind: string or int
(‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’), or integer of order of spline interpolation to be used
int_fact: int
factor by which the interpolated array is larger than the original array
Returns
-------
interpolated array: array
"""
return interpol(data,kind)(np.arange(0, len(data)-1, 1/int_fact))
def cut_snippets(data,event_locations,cut_width,int_met="linear",int_fact=10,max_offset = 1000000):
"""
cuts intervals from a data array, interpolates and aligns them and returns them in a list
TODO: ALIGN THEM TO CAUSE LEAST SQUARE ERROR
Parameters
----------
data: array
event_locations: array
cut_width: [int, int]
lower and upper limit of the intervals relative to the event locations.
f.e. [-15,15] indicates an interval of 30 datapoints around each event location
int_met: string or int
method of interpolation. (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’), or integer of order of spline interpolation to be used
int_fact: int
factor by which the interpolated array is larger than the original
max_offset: float
maximal offset by which the interpolated intervals can be moved to be aligned with each other. offset relative to the datapoints of the original data.
Returns
-------
aligned_snips: twodimensional nparray
the processed intervals (interval#,intervallen)
"""
snippets = []
heights = np.zeros(len(event_locations))
cut_width = [-cut_width, cut_width]
#alignwidth = int(np.ceil((max_offset) * int_fact))
alignwidth = 50
for pos in event_locations.astype('int'):
snippets.append(data[pos+cut_width[0]:pos+cut_width[1]])
ipoled_snips = np.empty((len(snippets), (cut_width[1]-cut_width[0])*int_fact-int_fact))
for i, snip in enumerate(snippets):
if len(snip) < ((cut_width[1]-cut_width[0])):
if i == 0:
snip = np.concatenate([np.zeros([((cut_width[1]-cut_width[0]) - len(snip))]),np.array(snip)])
if i == len(snippets):
snip = np.concatenate([snip, np.zeros([((cut_width[1]-cut_width[0])-len(snip))])])
else:
snip = np.zeros([(cut_width[1]-cut_width[0])])
#f_interpoled = interpol(snip, int_met) #if len(snip) > 0 else np.zeros([(cut_width[1]-cut_width[0]-1)*int_fact ])
interpoled_snip = interpolated_array(snip, int_met, 10)#f_interpoled(np.arange(0, len(snip)-1, 1/int_fact))
intsnipheight = np.max(interpoled_snip) - np.min(interpoled_snip)
if intsnipheight == 0:
intsnipheight = 1
interpoled_snip = (interpoled_snip - max(interpoled_snip))* 1/intsnipheight
ipoled_snips[i] = interpoled_snip
mean = np.mean(ipoled_snips, axis = 0)
aligned_snips = np.empty((len(snippets), (cut_width[1]-cut_width[0])* int_fact-(2*alignwidth)-int_fact))
for i, interpoled_snip in enumerate(ipoled_snips):
cc = crosscorrelation(interpoled_snip[alignwidth:-alignwidth], mean)
#cc = crosscorrelation(interpoled_snip[15 + 10*-cut_width[0]-10*7:-15+ -10*cut_width[1]+ 31], mean[10*-cut_width[0]-10*7:-10*cut_width[1]+31])
offset = -alignwidth + np.argmax(cc)
aligned_snip = interpoled_snip[alignwidth-offset:-alignwidth-offset] if offset != -alignwidth else interpoled_snip[2*alignwidth:]
if len(aligned_snip[~np.isnan(aligned_snip)])>0:
aligned_snips[i] = aligned_snip
try:
heights[i] = np.max(interpoled_snip[alignwidth-offset:-alignwidth-offset]) - np.min(interpoled_snip[alignwidth-offset:-alignwidth-offset])
except:
heights[i] = np.max(interpoled_snip[2*alignwidth:]) - np.min(interpoled_snip[2*alignwidth:])
return aligned_snips, heights
def pc(dataset):
"""
Calculates the principal components of a dataset using the python module scikit-learn's principal component analysis
Parameters
----------
dataset: ndarray
dataset of which the principal components are to be calculated.
twodimensional array of shape (observations, features)
Returns
-------
pc_comp: ndarray
principal components of the dataset
"""
pca = PCA(n_components=10)
pc_comp = pca.fit_transform(dataset)
return pc_comp #, pca
def chebyshev(dataset):
x = range(len(dataset[0]))
npol=5
p = np.zeros((len(dataset),npol+1))
for i,s in enumerate(dataset):
cheb = np.polynomial.chebyshev.Chebyshev.fit(x,s,npol)
p[i] = cheb.coef
return p #, pca
def dbscan(pcs, events, eps, min_samples, takekm):
"""
improve description, add parameter and returns
calculates clusters of high spatial density of the given observations in their feature space.
#For example, the first few principal components of the data could be used as features for the classification.
Parameters
----------
pcs: ndarray
%TODO
shape(samples, features)
...
Returns
-------
labels: ndarray
labels of the clusters of each observation
"""
# pcs (samples, features)
# X (samples, features)
#try:
# X = pcs[:,:order]
#except:
# X = pcs[:,order]
X = pcs
# #############################################################################
# Compute DBSCAN
clusters = DBSCAN(eps, min_samples).fit(X)
labels = clusters.labels_
comp = clusters.components_
comp_means = np.zeros((len(np.unique(labels)) - 1,comp.shape[1]))
for i in range(len(np.unique(labels)) - 1):
comp_means[i] = np.mean(pcs[labels==i],axis=0)
return labels, comp_means
def cluster_events(features, events, eps, min_samples, takekm, method='DBSCAN'):
"""F
clusters the given events using the given feature space and the clustering algorithm of choice and appends the assigned cluster number to the event's properties.
Parameters
----------
Returns
-------
"""
######################## function maybe could be even more generic, ? (dependant on datatype of "events" )
if method == 'DBSCAN':
labels, clusters = dbscan(features,events, eps, min_samples, takekm)
elif method == 'kMean':
pass
# To be implemented
#labels = kmeans([])
return labels, clusters
class Peaklist(object):
def __init__(self, peaklist):
self.list = peaklist
self.lastofclass = {}
self.lastofclassx = {}
self.classesnearby = []
self.classesnearbyx = []
self.classesnearbypccl = []
self.classlist = []
self.classamount = 0
self.shapes = {}
def connect_blocks(oldblock):
"""
used to connect blocks.
transfers data from the previous analysis block to the current block
"""
newblock = Peaklist([])
newblock.lastofclass = oldblock.lastofclass
newblock.lastofclassx = oldblock.lastofclassx
newblock.classesnearby = oldblock.classesnearby
newblock.classesnearbypccl = oldblock.classesnearbypccl
newblock.classesnearbyx = [clnearbyx - oldblock.len for clnearbyx in oldblock.classesnearbyx]
newblock.classamount = oldblock.classamount
newblock.len = oldblock.len
return newblock
def alignclusterlabels(labels, peaklist, peaks, data='test'):
"""
used to connect blocks.
changes the labels of clusters in the current block to fit with the labels of the previous block
"""
# take first second of new peak data
overlapamount = len(peaks[0,peaks[0]<30000])
if overlapamount == 0:
return None
old_peaklist = copy.deepcopy(peaklist) #redundant
overlappeaks = copy.deepcopy(peaks[:,:overlapamount])
overlap_peaklist = copy.deepcopy(old_peaklist)
# delete cluster classifications of the overlap class
overlappeaks[3]=[-1]*len(overlappeaks[0])
# set nearby pc classes to -1
overlap_peaklist.classesnearbypccl = [-1]*len(overlap_peaklist.classesnearbypccl)
# create peak labels using ampwalk classifier
classified_overlap = ampwalkclassify3_refactor(overlappeaks,overlap_peaklist,glue=True)[0]
# for each class
for cl in np.unique(classified_overlap[4]):
# indexes of the peaks with current class by ampwalk classification
labelindex = np.where(classified_overlap[4] == cl)[0]
# pc clustering labels that were originally given to those peaks
label = labels[labelindex]
# index of a peak with the most common translation from ampwalk class to pc clustering class
labelindex = labelindex[np.where(label == stats.mode(label)[0])[0][0]]
# pc clustering label belonging to the class cl in the new block
newlabel = labels[labelindex]
try:
peaklist.classesnearbypccl[old_peaklist.classesnearby.index(cl)] = newlabel
except:
pass
def ampwalkclassify3_refactor(peaks,peaklist,glue=False):
"""
classifies peaks/EOD_events into different classes by their amplitude.
Takes list of peaks and list of properties of the list of the last analysis block
Classifies the single peaks in the direction of their occurence in time, based on their amplitude and
their previously assigned class based on their waveform (... using the method cluster_events on the
principal components of the snippets around the single peaks)
Method:
calculates differences in amplitude between the current peak and different amplitudeclasses that are nearby. creates new amplitudeclass if no class is close enough. creates no new class if the peaks's waveformclass is a noiseclass of the DBSCAN algorithm. Does not compare peaks of different Waveformclasses.
--can be used without prior waveformclasses, resulting in classification solely on the amplitude development
pcclclasses need to be set to the same class herefore, .... . not practical, but should be possible to
split up into more general functions
"""
classamount = peaklist.classamount
lastofclass = peaklist.lastofclass
lastofclassx = peaklist.lastofclassx
a=0
elem = 0
thresholder = []
comperr = 1
classesnearby = peaklist.classesnearby
classesnearbyx = peaklist.classesnearbyx
classesnearbypccl = peaklist.classesnearbypccl
classes = np.zeros((len(peaks[0])))
pcclasses = peaks[3]
positions = peaks[0]
heights = peaks[2]
cl = 0
maxdistance = 30000 # Max distance to possibly belong to the same class
factor = 1.6 # factor by which a peak fits into a class, f.E: classheight = 1 , factor = 2 => peaks accepted in range (0.5,2)
c=0
# loop through all the new peaks
for peaknum, p in enumerate(peaks.T):
if len(lastofclass) == 0:
lastofclass[1] = deque()
lastofclassx[1] = deque()
lastofclass[1].append(heights[peaknum])
lastofclassx[1].append(positions[peaknum])
classesnearby.append(1)
classesnearbyx.append(-1)
classesnearbypccl.append(pcclasses[peaknum])
classes[peaknum] = 1
classamount += 1
continue
time1 = time.time()
# classes nearby only count if they are within maxdistance
for i, cl in enumerate(classesnearby):
if (positions[peaknum] - classesnearbyx[i]) > maxdistance:
classesnearby.pop(i)
classesnearbyx.pop(i)
classesnearbypccl.pop(i)
# compute mean isi of a class by taking the last 3 pulses in that class
lastofclassisis = []
for i in classesnearby:
lastofclassisis.append(np.median(np.diff(lastofclassx[i])))
meanisi = np.mean(lastofclassisis)
# stop adding to a class if 40 isis have passed
if 32000 > 40*meanisi> 6000:
maxdistance = 20*meanisi
cl = 0 # 'No class'
comperr = 100
clnrby = np.unique(classesnearby)
last_err = 1000
# TODO this assigns peaks with no class to the last clase if there are multiple candidates.
# can I fix this?
for i in clnrby:
# if the class of the current peak is equal to the current evaluated class or current peak has no class
if classesnearbypccl[classesnearby.index(i)] == pcclasses[peaknum]: #or glue==True: #pcclasses[peaknum] == -1: or
classmean = np.mean(lastofclass[i]) #mean hight of class
# difference between current peak hight and average class hight
logerror = np.abs(np.log2(heights[peaknum])-np.log2(classmean))
logthresh = np.log2(factor)
relerror = logerror
# if the peak hights are similar
if logerror < logthresh: ## SameClass-Condition
# if the peaks are close together in distance (20*isi)
if relerror < comperr and (positions[peaknum]-classesnearbyx[classesnearby.index(i)])<maxdistance:
# keep the same class (or in case of no class assign that class)
cl = i
comperr = relerror
time2 = time.time()
time1 = time.time()
# if a pc class is assigned to the peak
if pcclasses[peaknum] != -1:
# if the class is kept
if cl != 0 :
# append this peak to the peaklist for the right class (only keep last 3 peaks)
if len(lastofclass[cl]) >= 3:
lastofclass[cl].popleft()
if len(lastofclassx[cl]) >= 3:
lastofclassx[cl].popleft()
lastofclass[cl].append(heights[peaknum])
lastofclassx[cl].append(positions[peaknum])
classes[peaknum] = cl
else:
# if the class if not the same as any of the existing classes, create new class
cl = classamount+1
classamount = cl
lastofclass[cl] = deque()
lastofclassx[cl] = deque()
lastofclass[cl].append(heights[peaknum])
lastofclassx[cl].append(positions[peaknum])
classes[peaknum] = cl
classesnearby.append(cl)
classesnearbyx.append(positions[peaknum])
classesnearbypccl.append(pcclasses[peaknum])
# if there are more than 12 classes, delete the class that is furthest away in proximity
if len(classesnearby) >= 12: #kacke implementiert?
minind = classesnearbyx.index(min(classesnearbyx))
del lastofclass[classesnearby[minind]]
del lastofclassx[classesnearby[minind]]
classesnearby.pop(minind)
classesnearbyx.pop(minind)
classesnearbypccl.pop(minind)
# add position and class to peaklist
try:
ind=classesnearby.index(cl)
classesnearbyx[ind] = positions[peaknum]
except ValueError:
classesnearby.append(cl)
classesnearbyx.append(positions[peaknum])
classesnearbypccl.append(pcclasses[peaknum])
# if no pc class is assigned to the peak
elif glue == True:
# if a class is assigned through the peak amp method
if cl != 0:
# add this class to the peak
classes[peaknum] = cl
else:
# create new class
cl = classamount+1
classamount = cl
lastofclass[cl] = deque()
lastofclassx[cl] = deque()
lastofclass[cl].append(heights[peaknum])
lastofclassx[cl].append(positions[peaknum])
classes[peaknum] = cl
classesnearby.append(cl)
classesnearbyx.append(positions[peaknum])
classesnearbypccl.append(pcclasses[peaknum])
if len(classesnearby) >= 12: #kacke implementiert?
minind = classesnearbyx.index(min(classesnearbyx))
del lastofclass[classesnearby[minind]]
del lastofclassx[classesnearby[minind]]
classesnearby.pop(minind)
classesnearbyx.pop(minind)
classesnearbypccl.pop(minind)
try:
ind=classesnearby.index(cl)
classesnearbyx[ind] = positions[peaknum]
except ValueError:
classesnearby.append(cl)
classesnearbyx.append(positions[peaknum])
classesnearbypccl.append(pcclasses[peaknum])
peaklist.lastofclass = lastofclass
peaklist.lastofclassx = lastofclassx
peaklist.classesnearby = classesnearby
peaklist.classesnearbyx = classesnearbyx
peaklist.classesnearbypccl = classesnearbypccl
peaklist.classlist = classes # np.vectorize(lambda peak: peak.cl, otypes=[object])(peaklist.list)
peaklist.classamount = classamount
peaks = np.append(peaks,classes[None,:], axis = 0)
return peaks, peaklist
def discard_wave_pulses(peaks, data):
"""
discards events from a pulse_event list which are unusally wide (wider than a tenth of the inter pulse interval), which indicates a wave-type EOD instead of a pulse type
"""
deleteclasses = []
for cl in np.unique(peaks[3]):
peaksofclass = peaks[:,peaks[3] == cl]
isi = np.diff(peaksofclass[0])
isi_mean = np.mean(isi)
widepeaks = 0
isi_tenth_area = lambda x, isi : np.arange(np.floor(x-0.1*isi),np.ceil(x+0.1*isi),1, dtype = np.int)
for p in peaksofclass.T:
data = np.array(data)
try:
for dp_around in data[isi_tenth_area(p[0],isi_mean)]:
if dp_around <= p[1]-p[2]:
break
except (IndexError,ValueError) as e:
pass
else:
widepeaks+=1
if widepeaks > len(peaksofclass)*0.5:
deleteclasses.append(cl)
for cl in deleteclasses:
peaks = peaks[:,peaks[3]!=cl]
return peaks
def plot_events_on_data(peaks, data, colors):
"""
plots the detected events onto the data timeseries. If the events are classified, the classes are plotted in different colors and the class -1 (not belonging to a cluster) is plotted in black
"""
plt.plot(range(len(data)),data, color = 'black')
if len(peaks)>3:
classlist = np.array(peaks[3],dtype=np.int)
if len(peaks) > 4:
classlist = np.array(peaks[4],dtype=np.int)
#classlist=labels
cmap = plt.get_cmap('jet')
for cl in np.unique(classlist):
if cl == -1:
color = 'black'
else:
color = colors[cl]
peaksofclass = peaks[:,classlist == cl]
plt.plot(peaksofclass[0],peaksofclass[1], '.', color = color, ms =20, label=cl)
plt.legend()
else:
plt.scatter(peaks[0],peaks[1], color = 'red')
plt.show()
plt.close()
def discard_short_classes(events, minlen):
"""
returns all events despite events which are in classes with less than minlen members
"""
u, c = np.unique(events[-1],return_counts=True)
smallclasses = u[c<minlen]
classlist = events[-1]
delete = np.zeros(len(classlist))
for cl in smallclasses:
delete[classlist == cl] = 1
events = events[:,delete != 1]
return events
def path_leaf(path):
ntpath.basename("a/b/c")
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def save_EOD_events_to_npmmp(EOD_Events,eods_len,startblock,datasavepath,mmpname='eods.npmmp'):
n_EOD_Events = len(EOD_Events[0])
savepath = datasavepath+"/"+mmpname
if startblock:
eods = np.memmap(savepath,
dtype='float64', mode='w+',
shape=(4,n_EOD_Events), order = 'F')
else:
dtypesize = 8#4 #float32 is 32bit = >4< bytes long ---changed to float64 -> 8bit
eods = np.memmap(savepath, dtype=
'float64', mode='r+', offset = dtypesize*eods_len*4,
shape=(4,n_EOD_Events), order = 'F')
eods[:] = EOD_Events
def create_threshold_array(data,window,threshold):
thr_array = np.zeros(data.shape)
for i in range(int(len(data)/window)):
thr_array[i*window:(i+1)*window] = np.std(data[i*window:(i+1)*window])*4
thr_array[thr_array<threshold] = threshold
return thr_array
def alignlabels(labels,clusters,old_labels,old_clusters,maxlabel):
old_labels = old_labels[old_labels!=-1]
labels_new = -1*np.ones(labels.shape)
newclass = maxlabel
for curlabel, cluster in enumerate(clusters):
n = np.linalg.norm(old_clusters-cluster,axis=1)
if np.min(n) < 0.1:
labels_new[labels==curlabel] = old_labels[np.argmin(n)]
else:
labels_new[labels==curlabel] = newclass
newclass = newclass + 1
return labels_new
def analyze_pulse_data(filepath, deltat=10, thresh=0.04, starttime = 0, endtime = 0, cluster_thresh = 0.1, savepath = False,save=False, npmmp = False, plot_eods=False,plot_features=False,plot_steps=False, plot_result=False):
"""
analyzes timeseries of a pulse fish EOD recording
Parameters
----------
filepath: WAV-file with the recorded timeseries
deltat: int, optional
time for a single analysisblock (recommended less than a minute, due to principal component clustering on the EOD-waveforms)
thresh: float, optional
minimum threshold for the peakdetection (if computing frequencies recommended a tiny bit lower than the wished threshold, and instead discard the EOD below the wished threshold after computing the frequencies for each EOD.)
starttime: int or, str of int, optional
time into the data from where to start the analysis, seconds.
endtime: int or str of int, optional
time into the data where to end the analysis, seconds, larger than starttime.
cluster_thresh: float, optional
threshold that decides the cluster density of the EOD waveform features.
savepath = Boolean or str, optional
path to where to save results and intermediate result, only needed if save or npmmp is True.
string to specify a relative path to the directory where results and intermediate results will bed
or False to use preset savepath, which is ~/filepath/
or True to specify savepath as input when the script is running
save: Boolean, optional
True to save the results into a npy file at the savepath
npmmp: Boolean, optional
True to save intermediate results into a npmmp at the savepath, only recommended in case of memory overflow
plot_steps: Boolean, optional
True to plot the results of each analysis block
plot_results: Boolean, optional
True to plot the results of the final analysis. Not recommended for long recordings due to %TODO
plot_eods: Boolean, optional
True to plot the EOD waveforms for each analysis block
plot_features: Boolean, optional
True to plot the EOD waveform features for each analysis block
Returns
-------
eods: numpy array
2D numpy array. first axis: attributes of an EOD (x (datapoints), y (recorded voltage), height (difference from maximum to minimum), class), second axis: EODs in chronological order.
"""
# parameters for the analysis
thresh = 0.04 # minimal threshold for peakdetection
peakwidth = 20 # width of a peak and minimal distance between two EODs
# basic parameters for thunderfish.dataloader.open_data
verbose = 0
channel = 0
ultimate_threshold = thresh+0.01
startblock = 0
starttime = int(starttime)
endtime = int(endtime)
timegiven = False
if endtime > starttime>=0:
timegiven = True
peaks = np.array([])
troughs = np.array([])
filename = path_leaf(filepath)
eods_len = 0
if savepath==False:
datasavepath = filename[:-4]
elif savepath==True:
datasavepath = input('With the option npmmp enabled, a numpy memmap will be saved to: ').lower()
else: datasavepath=savepath
if save and (os.path.exists(datasavepath+"/eods8_"+filename[:-3]+"npy") or os.path.exists(datasavepath+"/eods5_"+filename[:-3]+"npy")):
print('there already exists an analyzed file, aborting. Change the code if you don\'t want to abort')
quit()
if npmmp:
#proceed = input('With the option npmmp enabled, a numpy memmap will be saved to ' + datasavepath + '. continue? [y/n] ').lower()
proceed = 'y'
if proceed != 'y':
quit()
# starting analysis
with open_data(filepath, channel, deltat, 0.0, verbose) as data:
samplerate = data.samplerate
# selected time interval
if timegiven == True:
parttime1 = starttime*samplerate
parttime2 = endtime*samplerate
data = data[parttime1:parttime2]
#split data into blocks
nblock = int(deltat*samplerate)
if len(data)%nblock != 0:
blockamount = len(data)//nblock + 1
else:
blockamount = len(data)//nblock
#fish = ProgressFish(total = blockamount)
pca_cur = 0
progress = 0
for idx in range(0, blockamount):
print('BLOCK %i/%i'%(idx+1,blockamount))
blockdata = data[idx*nblock:(idx+1)*nblock]
if progress < (idx*100 //blockamount):
progress = (idx*100)//blockamount
progressstr = ' Filestatus: '
# fish.animate(amount = idx, dexextra = progressstr)
# delete peaks under absolute threshold
#thresh_array = create_threshold_array(blockdata,30000,thresh)
pk, tr = detect_peaks(blockdata, thresh)
troughs = tr
if len(pk) > 3:
peaks = makeeventlist(pk,tr,blockdata,peakwidth)
peakindices, peakx, peakh = discardnearbyevents(peaks[0],peaks[1],peakwidth)
peaks = peaks[:,peakindices]
if len(peaks) > 0:
#if idx > startblock:
# # adding a new block as copy of old list, only difference is peak indexing as it refers to last block
# peaklist = connect_blocks(peaklist)
#else:
# peaklist = Peaklist([])
aligned_snips, snip_heights = cut_snippets(blockdata,peaks[0], 30, int_met = "cubic", int_fact = 10,max_offset = 20)
pols = chebyshev(aligned_snips)
feats = np.zeros((pols.shape[0],pols.shape[1]+1))
feats[:,:6] = pols
feats[:,-1] = snip_heights*0.1
#pcs, pca_cur = pc(aligned_snips) #pc_refactor(aligned_snips)
minpeaks = 3 if deltat < 2 else 10
labels, clusters = cluster_events(feats, peaks, cluster_thresh, minpeaks, False, method = 'DBSCAN')
peaks = np.append(peaks,[labels], axis = 0)
if idx > startblock:
# instead of the peaklist I would have to add the previous cluster means
# alignclusterlabels(labels, peaklist, peaks,data=blockdata)
peaks[-1] = alignlabels(labels,clusters,old_labels,old_clusters,maxlabel)
old_labels = np.unique(peaks[-1])
old_clusters = clusters
#I would want peaks updated here to have the right pc classes as well..
#peaks, peaklist = ampwalkclassify3_refactor(peaks, peaklist) # classification by amplitude
minlen = 5
peaks = discard_short_classes(peaks, minlen)
if len(peaks[0]) > 0:
peaks = discard_wave_pulses(peaks, blockdata)
# delete peaks under absolute threshold
#thresh_array = create_threshold_array(blockdata,30000)
#peaks = peaks[:,peaks[1]>thresh_array[list(map(int,peaks[0]))]]
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, 10))
if plot_steps == True:
plot_events_on_data(peaks, blockdata, colors)
pass
for lab in np.unique(labels):
if lab == -1:
c = 'k'
z=-1
else:
c=colors[lab]
z=1
if plot_eods==True:
plt.plot(range(aligned_snips.shape[1]),np.transpose(aligned_snips[labels == lab]),color=c,zorder=z,label=lab)
if plot_eods==True:
plt.title('Detected and classified EODs')
plt.xlabel('time [ms]')
plt.ylabel('signal (normalized)')
phandles, plabels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(plabels, phandles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
for lab in np.unique(labels):
if lab == -1:
c = 'k'
z = -1
else:
c = colors[lab]
z=1
if plot_features==True:
plt.plot(np.squeeze(np.transpose(feats[labels == lab])),color=c,zorder=z,label=lab)
if plot_features==True:
plt.title('EOD Features')
plt.xlabel('feature [#]')
plt.ylabel('value [a.u.]')
phandles, plabels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(plabels, phandles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
#peaklist.len = nblock
worldpeaks = np.copy(peaks)
worldpeaks[0] = worldpeaks[0] + (idx*nblock)
# delete the classification that only considers wave shape.
#thisblock_eods = np.delete(worldpeaks,3,0)
thisblock_eods = worldpeaks
if idx == startblock:
maxlabel = np.max(peaks[-1]) + 1
else:
maxlabel = np.max([maxlabel, (np.max(peaks[-1]) + 1)])
if npmmp:
if idx == startblock:
if not os.path.exists(datasavepath):
os.makedirs(datasavepath)
mmpname = "eods_"+filename[:-3]+"npmmp"
# save the peaks of the current buffered part to a numpy-memmap on the disk
save_EOD_events_to_npmmp(thisblock_eods,eods_len,idx==startblock,datasavepath,mmpname)
eods_len += len(thisblock_eods[0])
else:
if idx > 0:
all_eods = np.concatenate((all_eods,thisblock_eods),axis = 1)
else:
all_eods = thisblock_eods
if plot_steps == True:
print('FINAL RESULTS')
plot_events_on_data(all_eods, data, colors)
#plot_events_on_data(all_eods,data)
print('returnes analyzed EODS. Calculate frequencies using all of these but discard the data from the EODS within the lowest few percent of amplitude')
if npmmp:
all_eods = np.memmap(datasavepath+'/'+mmpname, dtype='float64', mode='r+', shape=(4,eods_len), order = 'F')
if save == 1:
path = filename[:-4]+"/"
if not os.path.exists(path):
os.makedirs(path)
if eods_len > 0:
np.save(datasavepath+"/eods8_"+filename[:-3]+"npy", all_eods)
print('Saved!')
else:
print('not saved')
return all_eods
def main():
eods = analyze_pulse_data(sys.argv[1], save=True, npmmp=True)
print(eods)
if __name__ == '__main__':
main()
|
gpl-3.0
|
hunse/deepnet
|
examples/vh_deepautoencoder.py
|
1
|
6961
|
"""
Learn a single-layer sparse autoencoder on Van Hateren data
(as in CogSci 2013 paper)
"""
import sys, os, time, datetime
os.environ['THEANO_FLAGS'] = 'device=gpu, floatX=float32'
import theano
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
plt.ion()
import deepnet
import deepnet.autoencoder as auto
import deepnet.functions as func
import deepnet.image_tools as imtools
import skdata.vanhateren.dataset
data = skdata.vanhateren.dataset.Calibrated(50)
data.meta # accessing this forces data arrays to be built
N = 60000
S = 32
patches = data.raw_patches((N, S, S), items=data.meta[:data.n_item_limit])
patches = patches.astype('float32')
patch_shape = patches.shape[1:]
### intensities are essentially log-normally distributed. So take the log
patches = np.log1p(patches)
def normalize(p):
std0 = patches.std()
mean, std = p.mean(axis=(1,2)), p.std(axis=(1,2))
return ((p - mean[:,None,None]) / np.maximum(std, 0.01*std0)[:,None,None])
patches = normalize(patches)
patches = patches.clip(-3, 3)
# patches = (2*(patches > 0) - 1).astype('float32')
################################################################################
# loadfile = 'results/vh_tied.npz'
# loadfile = 'results/vh_binary.npz'
loadfile = 'results/vh_flatlif.npz'
if not os.path.exists(loadfile):
linear = func.Linear(slope=1.0)
# noisylif = func.NoisyLIFApprox(
# tRef=0.02, tauRC=0.06, alpha=10.0, xint=-0.5, amp=1./41, sigma=0.05)
noisylif = func.NoisyLIFApprox(
tRef=0.002, tauRC=0.05, alpha=0.7, xint=-0.5, amp=1./50, sigma=0.001)
# params = [(auto.SparseAutoencoder, (50, 50), {'rfshape': (9,9), 'f': noisylif, 'g': linear}),
# (auto.Autoencoder, (40, 40), {'f': noisylif, 'g': noisylif}),
# (auto.Autoencoder, (30, 30), {'f': noisylif, 'g': noisylif}),
# (auto.Autoencoder, (20, 20), {'f': linear, 'g': noisylif})]
params = [
#(auto.SparseAutoencoder, (50, 50), {'rfshape': (9,9), 'f': noisylif, 'g': linear}),
"results/vh_flatlif.npz.layer_0_2013-10-07_12:00:39.npz",
# (auto.Autoencoder, (40, 40), {'f': noisylif, 'g': noisylif}),
"results/vh_flatlif.npz.layer_1_2013-10-07_12:20:10.npz",
# (auto.Autoencoder, (30, 30), {'f': noisylif, 'g': noisylif}),
"results/vh_flatlif.npz.layer_2_2013-10-07_12:25:06.npz",
(auto.Autoencoder, (20, 20), {'f': linear, 'g': noisylif})]
# params = ['results/vh_flatlif.npz.layer_0_2013-10-04_16:07:47.npz',
# # (auto.Autoencoder, (40, 40), {'f': noisylif, 'g': noisylif}),
# # (auto.SparseAutoencoder, (40, 40), {'rfshape': (13, 13), 'f': noisylif, 'g': noisylif}),
# (auto.Autoencoder, (30, 30), {'f': noisylif, 'g': noisylif}),
# (auto.Autoencoder, (20, 20), {'f': linear, 'g': noisylif})]
# params = ['results/vh_layer.npz',
# # (auto.SparseAutoencoder, (40, 40), {'rfshape': (13, 13), 'f': noisylif, 'g': noisylif}),
# 'results/vh_tied.npz.layer_1.npz',
# # (auto.Autoencoder, (30, 30), {'f': noisylif, 'g': noisylif}),
# # 'results/vh_tied.npz.layer_2.npz',
# 'results/vh_tied.npz.layer_2_2013-10-02_13:33:09.npz',
# # (auto.Autoencoder, (20, 20), {'f': linear, 'g': noisylif})
# 'results/vh_tied.npz.layer_3_2013-10-02_13:36:00.npz'
# ]
layers = []
for param in params:
if isinstance(param, str):
# load from file
enc = deepnet.base.CacheObject.from_file(param)
else:
# make a new layer
visshape = patch_shape if len(layers) == 0 else layers[-1].hidshape
EncoderClass, hidshape, p = param
enc = EncoderClass(visshape=visshape, hidshape=hidshape, **p)
layers.append(enc)
net = auto.DeepAutoencoder(layers)
else:
net = auto.DeepAutoencoder.from_file(loadfile)
# assert 0
# sgd_epochs = [layer for layer in net.layers if layer.train_stats
def algo_epochs(layer, algo):
return sum([s['n_epochs'] for s in layer.train_stats
if s['algorithm'] == algo])
# sgd_params = [dict(n_epochs=30, rate=0.05, clip=(-1,1)) for i in net.layers]
sgd_params = [dict(n_epochs=30, rate=0.05, clip=(-1,1)),
dict(n_epochs=30, rate=0.05, clip=(-1,1)),
dict(n_epochs=30, rate=0.01, clip=(-1,1)),
dict(n_epochs=50, rate=0.005, clip=(-1,1))]
if any(algo_epochs(layer, 'sgd') < sgd_params[i]['n_epochs']
for i, layer in enumerate(net.layers)):
if imtools.display_available():
### set figure size and position
fig = plt.figure(101, figsize=(11.925, 12.425))
# figman = plt.get_current_fig_manager()
# figman.window.wm_geometry('954x1028+2880+0')
# fig.set_size_inches([11.925, 12.425])
images = patches[:]
timages = patches[:500]
# train_params = [{'rho': 0.01, 'lamb': 5, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 0.1, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 0, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 0, 'noise_std': 0.2}]
# train_params = [{'rho': 0.05, 'lamb': 5, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 1, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 0, 'noise_std': 0.2},
# {'rho': 0.05, 'lamb': 0, 'noise_std': 0.2}]
train_params = [{'rho': 0.01, 'lamb': 5, 'noise_std': 0.1},
{'rho': 0.05, 'lamb': 0, 'noise_std': 0.1},
{'rho': 0.05, 'lamb': 0, 'noise_std': 0.1},
{'rho': 0.05, 'lamb': 0, 'noise_std': 0.0}]
for i, layer in enumerate(net.layers):
sgd_param = sgd_params[i]
train_param = train_params[i]
# subtract completed epochs
sgd_param['n_epochs'] -= algo_epochs(layer, 'sgd')
if sgd_param['n_epochs'] > 0:
trainer = auto.SparseTrainer(layer, **train_param)
test_fn = net.propVHV_fn(maxlayer=i)
# save_fn = lambda: net.to_file(loadfile)
save_fn = None
auto.sgd(trainer, images, timages, test_fn=test_fn,
vlims=(-2,2), save_fn=save_fn,
**sgd_param)
if save_fn is None:
# net.to_file(loadfile)
layer_file = "%s.layer_%d_%s.npz" % (
loadfile, i,
datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
layer.to_file(layer_file)
images = layer.compup(images)
net.to_file(loadfile)
if 1:
results = net.compVHV(patches)
rmses = imtools.rmse(patches, results)
print "rmse", rmses.mean(), rmses.std()
if imtools.display_available():
plt.figure(figsize=(11.925, 12.425))
imtools.compare([patches, results], rows=8, cols=12, vlims=(-2,2))
|
mit
|
saiwing-yeung/scikit-learn
|
sklearn/linear_model/tests/test_huber.py
|
25
|
6981
|
# Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_, huber_coef)
assert_array_almost_equal(huber.intercept_, huber_intercept)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
huber.fit(X, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber.coef_, huber_coef, 3)
assert_array_almost_equal(huber.intercept_, huber_intercept, 3)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y, sample_weight=[1, 3, 1, 2, 1])
assert_array_almost_equal(huber_sparse.coef_, huber_coef, 3)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=5, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=1000000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
# And as said above, the first iteration seems to be run anyway.
if huber_warm.n_iter_ is not None:
assert_equal(1, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
|
bsd-3-clause
|
skavulya/spark-tk
|
regression-tests/sparktkregtests/testcases/graph/graph_connected_test.py
|
11
|
2429
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test connected_components graphx, Valuesare checked against networkx"""
import unittest
from sparktkregtests.lib import sparktk_test
class ConnectedComponents(sparktk_test.SparkTKTestCase):
def test_connected_component(self):
""" Tests the graphx connected components in ATK"""
super(ConnectedComponents, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.vertices.sort("id")
self.frame.add_columns(lambda x: 2, ("value", int))
self.graph = self.context.graph.create(self.vertices, self.frame)
components = self.graph.connected_components()
components.sort('id')
components.add_columns(
lambda x: x['id'].split('_')[1], ("element", str))
frame = components.to_pandas(components.count())
group = frame.groupby('component').agg(lambda x: x.nunique())
# Each component should only have 1 element value, the name of the
# component
for _, row in group.iterrows():
self.assertEqual(row['element'], 1)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/core/tools/timedeltas.py
|
2
|
6506
|
"""
timedelta support tools
"""
import warnings
import numpy as np
from pandas._libs.tslibs import NaT
from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.arrays.timedeltas import sequence_to_td64ns
@deprecate_kwarg(old_arg_name="box", new_arg_name=None)
def to_timedelta(arg, unit="ns", box=True, errors="raise"):
"""
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
unit : str, default 'ns'
Denotes the unit of the arg. Possible values:
('Y', 'M', 'W', 'D', 'days', 'day', 'hours', hour', 'hr',
'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds',
'sec', 'second', 'ms', 'milliseconds', 'millisecond',
'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond',
'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos',
'nanosecond', 'N').
box : bool, default True
- If True returns a Timedelta/TimedeltaIndex of the results.
- If False returns a numpy.timedelta64 or numpy.darray of
values of dtype timedelta64[ns].
.. deprecated:: 0.25.0
Use :meth:`Series.to_numpy` or :meth:`Timedelta.to_timedelta64`
instead to get an ndarray of values or numpy.timedelta64,
respectively.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
timedelta64 or numpy.array of timedelta64
Output type returned if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02',
'00:00:03', '00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
Returning an ndarray by using the 'box' keyword argument:
>>> pd.to_timedelta(np.arange(5), box=False)
array([0, 1, 2, 3, 4], dtype='timedelta64[ns]')
"""
unit = parse_timedelta_unit(unit)
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}")
if unit in {"Y", "y", "M"}:
warnings.warn(
"M and Y units are deprecated and " "will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, unit=unit, box=False, errors=errors)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
arg = arg.item()
elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, timedelta, list, tuple, " "1-d array, or Series"
)
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, errors=errors)
def _coerce_scalar_to_timedelta_type(r, unit="ns", box=True, errors="raise"):
"""Convert string 'r' to a timedelta object."""
try:
result = Timedelta(r, unit)
if not box:
# explicitly view as timedelta64 for case when result is pd.NaT
result = result.asm8.view("timedelta64[ns]")
except ValueError:
if errors == "raise":
raise
elif errors == "ignore":
return r
# coerce
result = NaT
return result
def _convert_listlike(arg, unit="ns", box=True, errors="raise", name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"):
# This is needed only to ensure that in the case where we end up
# returning arg (errors == "ignore"), and where the input is a
# generator, we return a useful list-like instead of a
# used-up generator
arg = np.array(list(arg), dtype=object)
try:
value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]
except ValueError:
if errors == "ignore":
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit="ns", name=name)
return value
|
apache-2.0
|
NLeSC/embodied-emotions-scripts
|
embem/machinelearning/rakel_save_clf.py
|
1
|
1561
|
"""Script to train rakel classifier (based on all data) and save classifier
object
The classifier object is saved to <output_dir>/classifier.pkl
Usage: python rakel_save_clf.py <train file> <output_dir>
"""
from __future__ import print_function
import argparse
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
from rakel import RandomKLabelsets
from mlutils import get_data, split
from nltk.corpus import stopwords as sw
import string
import os
parser = argparse.ArgumentParser()
parser.add_argument('train_file', help='file containing the train data')
parser.add_argument('output_dir', help='directory to save the classifier to')
args = parser.parse_args()
stopwords = sw.words('dutch') + [p for p in string.punctuation]
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
classifier_file = '{}/classifier.pkl'.format(args.output_dir)
X_train, X_test, Y_train, Y_test, classes_ = get_data(args.train_file,
args.train_file)
clf = make_pipeline(TfidfVectorizer(analyzer=split,
stop_words=stopwords),
RandomKLabelsets(LinearSVC(class_weight='auto'),
n_estimators=Y_train.shape[1]*2,
labels_per_estimator=3))
clf.fit(X_train, Y_train)
# save classifier
joblib.dump(clf, classifier_file)
print('saved', classifier_file)
|
apache-2.0
|
LohithBlaze/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
178
|
8006
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
broadinstitute/ebola-predictor
|
lreg/train.py
|
1
|
6789
|
"""
Trains a Logistic Regression Classifier with binary output.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import argparse
import sys
import pandas as pd
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import matplotlib.pyplot as plt
def prefix():
return "lreg"
def title():
return "Logistic Regression"
def sigmoid(v):
return 1 / (1 + np.exp(-v))
def cost(theta, X, y, gamma):
M = X.shape[0]
h = sigmoid(np.dot(X, theta))
terms = -y * np.log(h) - (1-y) * np.log(1-h)
prod = theta * theta
prod[0] = 0
penalty = (gamma / (2 * M)) * np.sum(prod)
return terms.mean() + penalty
def gradient(theta, X, y, gamma):
M = X.shape[0]
N = X.shape[1]
# Note the vectorized operations using numpy:
# X is a MxN array, and theta a Nx1 array,
# so np.dot(X, theta) gives a Mx1 array, which
# in turn is used by the sigmoid function to
# perform the calculation component-wise and
# return another Mx1 array
h = sigmoid(np.dot(X, theta))
err = h - y
# err is a Mx1 array, so that its dot product
# with the MxN array X gives a Nx1 array, which
# in this case it is exactly the gradient!
costGrad = np.dot(err, X) / M
regCost = (gamma / M) * np.copy(theta)
regCost[0] = 0
grad = costGrad + regCost
global gcheck
if gcheck:
ok = True
epsilon = 1E-5
maxerr = 0.01
grad1 = np.zeros(N);
for i in range(0, N):
theta0 = np.copy(theta)
theta1 = np.copy(theta)
theta0[i] = theta0[i] - epsilon
theta1[i] = theta1[i] + epsilon
c0 = cost(theta0, X, y, gamma)
c1 = cost(theta1, X, y, gamma)
grad1[i] = (c1 - c0) / (2 * epsilon)
diff = abs(grad1[i] - grad[i])
if maxerr < diff:
print "Numerical and analytical gradients differ by",diff,"at argument",i,"/",N
ok = False
if ok:
print "Numerical and analytical gradients coincide within the given precision of",maxerr
return grad
def add_value(theta):
global params
global gamma
global values
(X, y, gamma) = params
value = cost(theta, X, y, gamma);
values = np.append(values, [value])
def optim(params, threshold):
global values
(X, y, gamma) = params
M = X.shape[0]
N = X.shape[1]
print ""
print "Running BFGS minimization..."
theta0 = 1 - 2 * np.random.rand(N)
thetaOpt = fmin_l_bfgs_b(cost, theta0, fprime=gradient, args=(X, y, gamma), pgtol=threshold, callback=add_value)[0]
return [True, thetaOpt]
def print_theta(theta, N, names):
print "{:10s} {:3.5f}".format("Intercept", theta[0])
for i in range(1, N):
print "{:10s} {:3.5f}".format(names[i-1], theta[i])
def save_theta(filename, theta, N, names):
with open(filename, "wb") as pfile:
pfile.write("Intercept " + str(theta[0]) + "\n")
for i in range(1, N):
pfile.write(names[i-1] + " " + str(theta[i]) + "\n")
"""
Trains the logistic regression classifier given the specified parameters
: param train_filename: name of file containing training set
: param param_filename: name of file to store resulting logistic regression parameters
: param kwparams: custom arguments for logistic regression: nv_reg (inverse of regularization
coefficient), threshold (default convergence threshold), show (show
minimization plot), debug (gradient check)
"""
def train(train_filename, param_filename, **kwparams):
if "inv_reg" in kwparams:
gamma = 1.0 / float(kwparams["inv_reg"])
else:
gamma = 0.08
if "threshold" in kwparams:
threshold = float(kwparams["threshold"])
else:
threshold = 1E-5
if "show" in kwparams:
show = True if kwparams["show"].lower() == "true" else False
else:
show = False
if "debug" in kwparams:
debug = True if kwparams["debug"].lower() == "true" else False
else:
debug = False
global gcheck
global params
global values
gcheck = debug
print "***************************************"
# Loading data frame and initalizing dimensions
df = pd.read_csv(train_filename, delimiter=',', na_values="?")
M = df.shape[0]
N = df.shape[1]
vars = df.columns.values[1: N]
print "Number of independent variables:", N-1
print "Number of data samples :", M
y = df.values[:,0]
# Building the (normalized) design matrix
X = np.ones((M, N))
for j in range(1, N):
# Computing i-th column. The pandas dataframe
# contains all the values as numpy arrays that
# can be handled individually:
values = df.values[:, j]
minv = values.min()
maxv = values.max()
if maxv > minv:
X[:, j] = np.clip((values - minv) / (maxv - minv), 0, 1)
else:
X[:, j] = 1.0 / M
values = np.array([])
params = (X, y, gamma)
[conv, theta] = optim(params, threshold)
if conv:
print "Convergence!"
else:
print "Error: cost function increased..."
print "Try adjusting the learning or the regularization coefficients"
if show:
plt.plot(np.arange(values.shape[0]), values)
plt.xlabel("Step number")
plt.ylabel("Cost function")
plt.show()
print ""
print "Logistic Regresion parameters:"
print_theta(theta, N, vars)
save_theta(param_filename, theta, N, vars)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--train", nargs=1, default=["./models/test/training-data-completed.csv"],
help="File containing training set")
parser.add_argument("-p", "--param", nargs=1, default=["./models/test/lreg-params"],
help="Output file to save the parameters of the neural net")
parser.add_argument("-r", "--inv_reg", nargs=1, type=float, default=[12.5],
help="Inverse of regularization coefficient, larger values represent lower penalty")
parser.add_argument("-c", "--convergence", nargs=1, type=float, default=[1E-5],
help="Convergence threshold for the BFGS minimizer")
parser.add_argument("-s", "--show", action="store_true",
help="Shows minimization plot")
parser.add_argument("-d", "--debug", action="store_true",
help="Debugs gradient calculation")
args = parser.parse_args()
train(args.train[0], args.param[0],
inv_reg=str(args.inv_reg[0]),
threshold=str(args.convergence[0]),
show=str(args.show),
debug=str(args.debug))
|
bsd-2-clause
|
yunfeilu/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
323
|
1602
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
shoyer/xarray
|
xarray/core/combine.py
|
1
|
38182
|
import itertools
import warnings
from collections import Counter
from textwrap import dedent
import pandas as pd
from . import dtypes
from .concat import concat
from .dataarray import DataArray
from .dataset import Dataset
from .merge import merge
def _infer_concat_order_from_positions(datasets):
combined_ids = dict(_infer_tile_ids_from_nested_list(datasets, ()))
return combined_ids
def _infer_tile_ids_from_nested_list(entry, current_pos):
"""
Given a list of lists (of lists...) of objects, returns a iterator
which returns a tuple containing the index of each object in the nested
list structure as the key, and the object. This can then be called by the
dict constructor to create a dictionary of the objects organised by their
position in the original nested list.
Recursively traverses the given structure, while keeping track of the
current position. Should work for any type of object which isn't a list.
Parameters
----------
entry : list[list[obj, obj, ...], ...]
List of lists of arbitrary depth, containing objects in the order
they are to be concatenated.
Returns
-------
combined_tile_ids : dict[tuple(int, ...), obj]
"""
if isinstance(entry, list):
for i, item in enumerate(entry):
yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,))
else:
yield current_pos, entry
def _infer_concat_order_from_coords(datasets):
concat_dims = []
tile_ids = [() for ds in datasets]
# All datasets have same variables because they've been grouped as such
ds0 = datasets[0]
for dim in ds0.dims:
# Check if dim is a coordinate dimension
if dim in ds0:
# Need to read coordinate values to do ordering
indexes = [ds.indexes.get(dim) for ds in datasets]
if any(index is None for index in indexes):
raise ValueError(
"Every dimension needs a coordinate for "
"inferring concatenation order"
)
# If dimension coordinate values are same on every dataset then
# should be leaving this dimension alone (it's just a "bystander")
if not all(index.equals(indexes[0]) for index in indexes[1:]):
# Infer order datasets should be arranged in along this dim
concat_dims.append(dim)
if all(index.is_monotonic_increasing for index in indexes):
ascending = True
elif all(index.is_monotonic_decreasing for index in indexes):
ascending = False
else:
raise ValueError(
"Coordinate variable {} is neither "
"monotonically increasing nor "
"monotonically decreasing on all datasets".format(dim)
)
# Assume that any two datasets whose coord along dim starts
# with the same value have the same coord values throughout.
if any(index.size == 0 for index in indexes):
raise ValueError("Cannot handle size zero dimensions")
first_items = pd.Index([index[0] for index in indexes])
# Sort datasets along dim
# We want rank but with identical elements given identical
# position indices - they should be concatenated along another
# dimension, not along this one
series = first_items.to_series()
rank = series.rank(method="dense", ascending=ascending)
order = rank.astype(int).values - 1
# Append positions along extra dimension to structure which
# encodes the multi-dimensional concatentation order
tile_ids = [
tile_id + (position,) for tile_id, position in zip(tile_ids, order)
]
if len(datasets) > 1 and not concat_dims:
raise ValueError(
"Could not find any dimension coordinates to use to "
"order the datasets for concatenation"
)
combined_ids = dict(zip(tile_ids, datasets))
return combined_ids, concat_dims
def _check_dimension_depth_tile_ids(combined_tile_ids):
"""
Check all tuples are the same length, i.e. check that all lists are
nested to the same depth.
"""
tile_ids = combined_tile_ids.keys()
nesting_depths = [len(tile_id) for tile_id in tile_ids]
if not nesting_depths:
nesting_depths = [0]
if not set(nesting_depths) == {nesting_depths[0]}:
raise ValueError(
"The supplied objects do not form a hypercube because"
" sub-lists do not have consistent depths"
)
# return these just to be reused in _check_shape_tile_ids
return tile_ids, nesting_depths
def _check_shape_tile_ids(combined_tile_ids):
"""Check all lists along one dimension are same length."""
tile_ids, nesting_depths = _check_dimension_depth_tile_ids(combined_tile_ids)
for dim in range(nesting_depths[0]):
indices_along_dim = [tile_id[dim] for tile_id in tile_ids]
occurrences = Counter(indices_along_dim)
if len(set(occurrences.values())) != 1:
raise ValueError(
"The supplied objects do not form a hypercube "
"because sub-lists do not have consistent "
"lengths along dimension" + str(dim)
)
def _combine_nd(
combined_ids,
concat_dims,
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Combines an N-dimensional structure of datasets into one by applying a
series of either concat and merge operations along each dimension.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match the length of the tuples used as
keys in combined_ids. If the string is a dimension name then concat
along that dimension, if it is None then merge.
Returns
-------
combined_ds : xarray.Dataset
"""
example_tile_id = next(iter(combined_ids.keys()))
n_dims = len(example_tile_id)
if len(concat_dims) != n_dims:
raise ValueError(
"concat_dims has length {} but the datasets "
"passed are nested in a {}-dimensional structure".format(
len(concat_dims), n_dims
)
)
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
(combined_ds,) = combined_ids.values()
return combined_ds
def _combine_all_along_first_dim(
combined_ids,
dim,
data_vars,
coords,
compat,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
# Group into lines of datasets which must be combined along dim
# need to sort by _new_tile_id first for groupby to work
# TODO: is the sorted need?
combined_ids = dict(sorted(combined_ids.items(), key=_new_tile_id))
grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)
# Combine all of these datasets along dim
new_combined_ids = {}
for new_id, group in grouped:
combined_ids = dict(sorted(group))
datasets = combined_ids.values()
new_combined_ids[new_id] = _combine_1d(
datasets, dim, compat, data_vars, coords, fill_value, join, combine_attrs
)
return new_combined_ids
def _combine_1d(
datasets,
concat_dim,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Applies either concat or merge to 1D list of datasets depending on value
of concat_dim
"""
if concat_dim is not None:
try:
combined = concat(
datasets,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
except ValueError as err:
if "encountered unexpected variable" in str(err):
raise ValueError(
"These objects cannot be combined using only "
"xarray.combine_nested, instead either use "
"xarray.combine_by_coords, or do it manually "
"with xarray.concat, xarray.merge and "
"xarray.align"
)
else:
raise
else:
combined = merge(
datasets,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
def _new_tile_id(single_id_ds_pair):
tile_id, ds = single_id_ds_pair
return tile_id[1:]
def _nested_combine(
datasets,
concat_dims,
compat,
data_vars,
coords,
ids,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
if len(datasets) == 0:
return Dataset()
# Arrange datasets for concatenation
# Use information from the shape of the user input
if not ids:
# Determine tile_IDs by structure of input in N-D
# (i.e. ordering in list-of-lists)
combined_ids = _infer_concat_order_from_positions(datasets)
else:
# Already sorted so just use the ids already passed
combined_ids = dict(zip(ids, datasets))
# Check that the inferred shape is combinable
_check_shape_tile_ids(combined_ids)
# Apply series of concatenate or merge operations along each dimension
combined = _combine_nd(
combined_ids,
concat_dims,
compat=compat,
data_vars=data_vars,
coords=coords,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
def combine_nested(
datasets,
concat_dim,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Explicitly combine an N-dimensional grid of datasets into one by using a
succession of concat and merge operations along each dimension of the grid.
Does not sort the supplied datasets under any circumstances, so the
datasets must be passed in the order you wish them to be concatenated. It
does align coordinates, but different variables on datasets can cause it to
fail under some scenarios. In complex cases, you may need to clean up your
data and use concat/merge explicitly.
To concatenate along multiple dimensions the datasets must be passed as a
nested list-of-lists, with a depth equal to the length of ``concat_dims``.
``manual_combine`` will concatenate along the top-level list first.
Useful for combining datasets from a set of nested directories, or for
collecting the output of a simulation parallelized along multiple
dimensions.
Parameters
----------
datasets : list or nested list of xarray.Dataset objects.
Dataset objects to combine.
If concatenation or merging along more than one dimension is desired,
then datasets must be supplied in a nested list-of-lists.
concat_dim : str, or list of str, DataArray, Index or None
Dimensions along which to concatenate variables, as used by
:py:func:`xarray.concat`.
Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
and merge instead along a particular dimension.
The position of ``None`` in the list specifies the dimension of the
nested-list input along which to merge.
Must be the same length as the depth of the list passed to
``datasets``.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
potential merge conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
data_vars : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of concat
coords : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of concat
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'},
default 'drop'
String indicating how to combine attrs of the objects being merged:
- 'drop': empty attrs on returned Dataset.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have
the same name must also have the same value.
- 'override': skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
combined : xarray.Dataset
Examples
--------
A common task is collecting data from a parallelized simulation in which
each process wrote out to a separate file. A domain which was decomposed
into 4 parts, 2 each along both the x and y axes, requires organising the
datasets into a doubly-nested list, e.g:
>>> x1y1
<xarray.Dataset>
Dimensions: (x: 2, y: 2)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 11.04 23.57 20.77 ...
precipitation (x, y) float64 5.904 2.453 3.404 ...
>>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 11.04 23.57 20.77 ...
precipitation (x, y) float64 5.904 2.453 3.404 ...
``manual_combine`` can also be used to explicitly merge datasets with
different variables. For example if we have 4 datasets, which are divided
along two times, and contain two different variables, we can pass ``None``
to ``concat_dim`` to specify the dimension of the nested list over which
we wish to use ``merge`` instead of ``concat``:
>>> t1temp
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 11.04 23.57 20.77 ...
>>> t1precip
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
precipitation (t) float64 5.904 2.453 3.404 ...
>>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
<xarray.Dataset>
Dimensions: (t: 10)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 11.04 23.57 20.77 ...
precipitation (t) float64 5.904 2.453 3.404 ...
See also
--------
concat
merge
auto_combine
"""
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim]
# The IDs argument tells _manual_combine that datasets aren't yet sorted
return _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=False,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
def vars_as_keys(ds):
return tuple(sorted(ds))
def combine_by_coords(
datasets,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="no_conflicts",
):
"""
Attempt to auto-magically combine the given datasets into one by using
dimension coordinates.
This method attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `manual_combine`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to combine.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, `data_vars` must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
As per the 'data_vars' kwarg, but for coordinate variables.
fill_value : scalar, optional
Value to use for newly missing values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'},
default 'drop'
String indicating how to combine attrs of the objects being merged:
- 'drop': empty attrs on returned Dataset.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have
the same name must also have the same value.
- 'override': skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> import numpy as np
>>> import xarray as xr
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 1.654 10.63 7.015 2.543 13.93 9.436
precipitation (y, x) float64 0.2136 0.9974 0.7603 0.4679 0.3115 0.945
>>> x2
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* y (y) int64 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 9.341 0.1251 6.269 7.709 8.82 2.316
precipitation (y, x) float64 0.1728 0.1178 0.03018 0.6509 0.06938 0.3792
>>> x3
<xarray.Dataset>
Dimensions: (x: 3, y: 2)
Coordinates:
* y (y) int64 2 3
* x (x) int64 40 50 60
Data variables:
temperature (y, x) float64 2.789 2.446 6.551 12.46 2.22 15.96
precipitation (y, x) float64 0.4804 0.1902 0.2457 0.6125 0.4654 0.5953
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset>
Dimensions: (x: 3, y: 4)
Coordinates:
* x (x) int64 10 20 30
* y (y) int64 0 1 2 3
Data variables:
temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 7.709 8.82 2.316
precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6509 0.06938 0.3792
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset>
Dimensions: (x: 6, y: 4)
Coordinates:
* x (x) int64 10 20 30 40 50 60
* y (y) int64 0 1 2 3
Data variables:
temperature (y, x) float64 1.654 10.63 7.015 nan ... nan 12.46 2.22 15.96
precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset>
Dimensions: (x: 3, y: 4)
Coordinates:
* x (x) int64 10 20 30
* y (y) int64 0 1 2 3
Data variables:
temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 12.46 2.22 15.96
precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset>
Dimensions: (x: 6, y: 4)
Coordinates:
* x (x) int64 10 20 30 40 50 60
* y (y) int64 0 1 2 3
Data variables:
temperature (y, x) float64 1.654 10.63 7.015 nan ... 12.46 2.22 15.96
precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953
"""
# Group by data vars
sorted_datasets = sorted(datasets, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = []
for vars, datasets_with_same_vars in grouped_by_vars:
combined_ids, concat_dims = _infer_concat_order_from_coords(
list(datasets_with_same_vars)
)
if fill_value is None:
# check that datasets form complete hypercube
_check_shape_tile_ids(combined_ids)
else:
# check only that all datasets have same dimension depth for these
# vars
_check_dimension_depth_tile_ids(combined_ids)
# Concatenate along all of concat_dims one by one to create single ds
concatenated = _combine_nd(
combined_ids,
concat_dims=concat_dims,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
# Check the overall coordinates are monotonically increasing
for dim in concat_dims:
indexes = concatenated.indexes.get(dim)
if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing):
raise ValueError(
"Resulting object does not have monotonic"
" global indexes along dimension {}".format(dim)
)
concatenated_grouped_by_data_vars.append(concatenated)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
# Everything beyond here is only needed until the deprecation cycle in #2616
# is completed
_CONCAT_DIM_DEFAULT = "__infer_concat_dim__"
def auto_combine(
datasets,
concat_dim="_not_supplied",
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
from_openmfds=False,
):
"""
Attempt to auto-magically combine the given datasets into one.
This entire function is deprecated in favour of ``combine_nested`` and
``combine_by_coords``.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or sort data under
any circumstances. It does align coordinates, but different variables on
datasets can cause it to fail under some scenarios. In complex cases, you
may need to clean up your data and use ``concat``/``merge`` explicitly.
``auto_combine`` works well if you have N years of data and M data
variables, and each combination of a distinct time period and set of data
variables is saved its own dataset.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if
the dimension along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- 'override': skip comparing and pick variable from first dataset
data_vars : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of concat
coords : {'minimal', 'different', 'all' o list of str}, optional
Details are in the documentation of concat
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
"""
if not from_openmfds:
basic_msg = dedent(
"""\
In xarray version 0.15 `auto_combine` will be deprecated. See
http://xarray.pydata.org/en/stable/combining.html#combining-multi"""
)
warnings.warn(basic_msg, FutureWarning, stacklevel=2)
if concat_dim == "_not_supplied":
concat_dim = _CONCAT_DIM_DEFAULT
message = ""
else:
message = dedent(
"""\
Also `open_mfdataset` will no longer accept a `concat_dim` argument.
To get equivalent behaviour from now on please use the new
`combine_nested` function instead (or the `combine='nested'` option to
`open_mfdataset`)."""
)
if _dimension_coords_exist(datasets):
message += dedent(
"""\
The datasets supplied have global dimension coordinates. You may want
to use the new `combine_by_coords` function (or the
`combine='by_coords'` option to `open_mfdataset`) to order the datasets
before concatenation. Alternatively, to continue concatenating based
on the order the datasets are supplied in future, please use the new
`combine_nested` function (or the `combine='nested'` option to
open_mfdataset)."""
)
else:
message += dedent(
"""\
The datasets supplied do not have global dimension coordinates. In
future, to continue concatenating without supplying dimension
coordinates, please use the new `combine_nested` function (or the
`combine='nested'` option to open_mfdataset."""
)
if _requires_concat_and_merge(datasets):
manual_dims = [concat_dim].append(None)
message += dedent(
"""\
The datasets supplied require both concatenation and merging. From
xarray version 0.15 this will operation will require either using the
new `combine_nested` function (or the `combine='nested'` option to
open_mfdataset), with a nested list structure such that you can combine
along the dimensions {}. Alternatively if your datasets have global
dimension coordinates then you can use the new `combine_by_coords`
function.""".format(
manual_dims
)
)
warnings.warn(message, FutureWarning, stacklevel=2)
return _old_auto_combine(
datasets,
concat_dim=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
fill_value=fill_value,
join=join,
)
def _dimension_coords_exist(datasets):
"""
Check if the datasets have consistent global dimension coordinates
which would in future be used by `auto_combine` for concatenation ordering.
"""
# Group by data vars
sorted_datasets = sorted(datasets, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Simulates performing the multidimensional combine on each group of data
# variables before merging back together
try:
for vars, datasets_with_same_vars in grouped_by_vars:
_infer_concat_order_from_coords(list(datasets_with_same_vars))
return True
except ValueError:
# ValueError means datasets don't have global dimension coordinates
# Or something else went wrong in trying to determine them
return False
def _requires_concat_and_merge(datasets):
"""
Check if the datasets require the use of both xarray.concat and
xarray.merge, which in future might require the user to use
`manual_combine` instead.
"""
# Group by data vars
sorted_datasets = sorted(datasets, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
return len(list(grouped_by_vars)) > 1
def _old_auto_combine(
datasets,
concat_dim=_CONCAT_DIM_DEFAULT,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
):
if concat_dim is not None:
dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim
sorted_datasets = sorted(datasets, key=vars_as_keys)
grouped = itertools.groupby(sorted_datasets, key=vars_as_keys)
concatenated = [
_auto_concat(
list(datasets),
dim=dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
)
for vars, datasets in grouped
]
else:
concatenated = datasets
merged = merge(concatenated, compat=compat, fill_value=fill_value, join=join)
return merged
def _auto_concat(
datasets,
dim=None,
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
compat="no_conflicts",
):
if len(datasets) == 1 and dim is None:
# There is nothing more to combine, so kick out early.
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = {i for i, _ in dim_tuples}
if len(concat_dims) > 1:
concat_dims = {d for d in concat_dims if not ds0[d].equals(ds1[d])}
if len(concat_dims) > 1:
raise ValueError(
"too many different dimensions to " "concatenate: %s" % concat_dims
)
elif len(concat_dims) == 0:
raise ValueError(
"cannot infer dimension to concatenate: "
"supply the ``concat_dim`` argument "
"explicitly"
)
(dim,) = concat_dims
return concat(
datasets,
dim=dim,
data_vars=data_vars,
coords=coords,
fill_value=fill_value,
compat=compat,
)
|
apache-2.0
|
isb-cgc/ISB-CGC-data-proc
|
gdc/etl/isoform_expression_quantification.py
|
1
|
2220
|
'''
Created on Oct 13, 2016
Copyright 2016, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
import pandas as pd
from gdc.etl import etl
class Isoform_expression_quantification(etl.Etl):
def __init__(self, config):
'''
Constructor
columns in the GDC file:
miRNA_ID isoform_coords read_count reads_per_million_miRNA_mapped cross-mapped miRNA_region
columns for the BQ table:
sample_barcode
miRNA_ID
read_count
reads_per_million_miRNA_mapped
genomic_build (from isoform_coords)
chromosome (from isoform_coords)
start (from isoform_coords)
end (from isoform_coords)
strand (from isoform_coords)
cross_mapped
mirna_accession (from miRNA_region)
mirna_transcript (from miRNA_region)
project_short_name
program_name
sample_type_code
file_name
file_gdc_id
aliquot_barcode
case_barcode
case_gdc_id
sample_gdc_id
aliquot_gdc_id
'''
pass
def data_type_specific(self, config, file_df):
# combine the two versions of the transcript fields (one with an accession, the other without)
file_df['mirna_transcript'] = file_df[['mirna_transcript', 'mirna_trans']].apply(lambda x: x[0] if pd.isnull(x[1]) else x[1], axis=1)
def skip_file(self, config, data_type, path, program_name, file2info, info, log):
if 'miRNA isoform quantification' == info['data_type'] and -1 == info['file_name'].find('hg19.mirbase20'):
return True
return False
|
apache-2.0
|
wronk/mne-python
|
mne/cov.py
|
1
|
75382
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
from distutils.version import LooseVersion
import itertools as itt
from math import log
import os
import numpy as np
from scipy import linalg
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_needs_eeg_average_ref_proj)
from .io import fiff_open
from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info,
_picks_by_type, _pick_data_channels)
from .io.constants import FIFF
from .io.meas_info import read_bad_channels
from .io.proj import _read_proj, _write_proj
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import Epochs
from .event import make_fixed_length_events
from .utils import (check_fname, logger, verbose, estimate_rank,
_compute_row_norms, check_version, _time_mask, warn,
_check_copy_dep)
from .fixes import in1d
from .externals.six.moves import zip
from .externals.six import string_types
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""get the slice."""
mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq'])
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of string
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None):
"""Init of covariance."""
diag = True if data.ndim == 1 else False
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception as inst:
fid.close()
os.remove(fname)
raise inst
end_file(fid)
def copy(self):
"""Copy the Covariance object
Returns
-------
cov : instance of Covariance
The copied object.
"""
return cp.deepcopy(self)
def as_diag(self, copy=None):
"""Set covariance to be processed as being diagonal.
Parameters
----------
copy : bool
This parameter has been deprecated and will be removed in 0.13.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
"""
cov = _check_copy_dep(self, copy, default=True)
if cov['diag']:
return cov
cov['diag'] = True
cov['data'] = np.diag(cov['data'])
cov['eig'] = None
cov['eigvec'] = None
return cov
def __repr__(self):
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cp.deepcopy(cov)
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Call pyplot.show() as the end or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
from .viz.misc import plot_cov
return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : string
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of Info
Measurement info.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
.. versionadded:: 0.9.0
"""
info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=[]))
info._check_consistency()
# Standard deviations to be used
grad_std = 5e-13
mag_std = 20e-15
eeg_std = 0.2e-6
logger.info('Using standard noise values '
'(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
% (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
data = np.zeros(len(info['ch_names']))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(grad_std, mag_std, eeg_std)):
data[pick_types(info, meg=meg, eeg=eeg)] = val * val
return Covariance(data, info['ch_names'], info['bads'], info['projs'],
nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
warn('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
@verbose
def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None,
flat=None, picks=None, method='empirical',
method_params=None, cv=3, scalings=None, n_jobs=1,
return_estimators=False, verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance from empty room
data or time intervals before starting the stimulation.
.. note:: This function will:
1. Partition the data into evenly spaced, equal-length
epochs.
2. Load them into memory.
3. Subtract the mean across all time points and epochs
for each channel.
4. Process the :class:`Epochs` by
:func:`compute_covariance`.
This will produce a slightly different result compared to
using :func:`make_fixed_length_events`, :class:`Epochs`, and
:func:`compute_covariance` directly, since that would (with
the recommended baseline correction) subtract the mean across
time *for each epoch* (instead of across epochs) for each
channel.
Parameters
----------
raw : instance of Raw
Raw data
tmin : float
Beginning of time interval in seconds. Defaults to 0.
tmax : float | None (default None)
End of time interval in seconds. If None (default), use the end of the
recording.
tstep : float (default 0.2)
Length of data chunks for artefact rejection in seconds.
Can also be None to use a single epoch of (tmax - tmin)
duration. This can use a lot of memory for large ``Raw``
instances.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
picks : array-like of int | None (default None)
Indices of channels to include (if None, data channels are used).
method : str | list | None (default 'empirical')
The method used for covariance estimation.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
method_params : dict | None (default None)
Additional parameters to the estimation procedure.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
.. versionadded:: 0.12
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
.. versionadded:: 0.12
n_jobs : int (default 1)
Number of jobs to run in parallel.
.. versionadded:: 0.12
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
.. versionadded:: 0.12
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_covariance : Estimate noise covariance matrix from epochs
"""
tmin = 0. if tmin is None else float(tmin)
tmax = raw.times[-1] if tmax is None else float(tmax)
tstep = tmax - tmin if tstep is None else float(tstep)
tstep_m1 = tstep - 1. / raw.info['sfreq'] # inclusive!
events = make_fixed_length_events(raw, 1, tmin, tmax, tstep)
pl = 's' if len(events) != 1 else ''
logger.info('Using up to %s segment%s' % (len(events), pl))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
# Need to include all channels e.g. if eog rejection is to be used
picks = np.arange(raw.info['nchan'])
pick_mask = in1d(
picks, _pick_data_channels(raw.info, with_ref_meg=False))
else:
pick_mask = slice(None)
epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None,
picks=picks, reject=reject, flat=flat, verbose=False,
preload=False, proj=False)
if isinstance(method, string_types) and method == 'empirical':
# potentially *much* more memory efficient to do it the iterative way
picks = picks[pick_mask]
data = 0
n_samples = 0
mu = 0
# Read data in chunks
for raw_segment in epochs:
raw_segment = raw_segment[pick_mask]
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
_check_n_samples(n_samples, len(picks))
mu /= n_samples
data -= n_samples * mu[:, None] * mu[None, :]
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
projs = cp.deepcopy(raw.info['projs'])
return Covariance(data, ch_names, bads, projs, nfree=n_samples)
del picks, pick_mask
# This makes it equivalent to what we used to do (and do above for
# empirical mode), treating all epochs as if they were a single long one
epochs.load_data()
ch_means = epochs._data.mean(axis=0).mean(axis=1)
epochs._data -= ch_means[np.newaxis, :, np.newaxis]
# fake this value so there are no complaints from compute_covariance
epochs.baseline = (None, None)
return compute_covariance(epochs, keep_sample_mean=True, method=method,
method_params=method_params, cv=cv,
scalings=scalings, n_jobs=n_jobs,
return_estimators=return_estimators)
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stim periods
when the stim onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined:
1. either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
2. an Epochs object is created for multiple events and passed
to this function.
.. note:: Baseline correction should be used when creating the Epochs.
Otherwise the computed covariance matrix will be inaccurate.
.. note:: For multiple event types, it is also possible to create a
single Epochs object with events obtained using
merge_events(). However, the resulting covariance matrix
will only be correct if keep_sample_mean is True.
.. note:: The covariance can be unstable if the number of samples is
not sufficient. In that case it is common to regularize a
covariance estimate. The ``method`` parameter of this
function allows to regularize the covariance in an
automated way. It also allows to select between different
alternative estimation algorithms which themselves achieve
regularization. Details are described in [1]_.
Parameters
----------
epochs : instance of Epochs, or a list of Epochs objects
The epochs.
keep_sample_mean : bool (default True)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to run a
set of the different methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in [1]_. Valid methods are:
* ``'empirical'``: the empirical or sample covariance
* ``'diagonal_fixed'``: a diagonal regularization as in
mne.cov.regularize (see MNE manual)
* ``'ledoit_wolf'``: the Ledoit-Wolf estimator [2]_
* ``'shrunk'``: like 'ledoit_wolf' with cross-validation for
optimal alpha (see scikit-learn documentation on covariance
estimation)
* ``'pca'``: probabilistic PCA with low rank [3]_
* ``'factor_analysis'``: Factor Analysis with low rank [4]_
If ``'auto'``, this expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
.. note:: ``'ledoit_wolf'`` and ``'pca'`` are similar to
``'shrunk'`` and ``'factor_analysis'``, respectively. They are not
included to avoid redundancy. In most cases ``'shrunk'`` and
``'factor_analysis'`` represent more appropriate default
choices.
The ``'auto'`` mode is not recommended if there are many
segments of data, since computation can take a long time.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of `method`.
If None (default), expands to::
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False,
'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
n_jobs : int (default 1)
Number of jobs to run in parallel.
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
verbose : bool | str | int | or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
.. [2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
large-dimensional covariance matrices. Journal of Multivariate
Analysis 88 (2), 365 - 411.
.. [3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
component analysis. Journal of the Royal Statistical Society:
Series B (Statistical Methodology) 61 (3), 611 - 622.
.. [4] Barber, D., (2012). Bayesian reasoning and machine learning.
Cambridge University Press., Algorithm 21.1
"""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'shrunk', 'pca', 'factor_analysis',)
msg = ('Invalid method ({method}). Accepted values (individually or '
'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
# scale to natural unit for best stability with MEG/EEG
if isinstance(scalings, dict):
for k, v in scalings.items():
if k not in ('mag', 'grad', 'eeg'):
raise ValueError('The keys in `scalings` must be "mag" or'
'"grad" or "eeg". You gave me: %s' % k)
scalings = _handle_default('scalings', scalings)
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
for epochs_t in epochs:
if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and \
keep_sample_mean:
warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
for epoch in epochs:
epoch.info._check_consistency()
bads = epochs[0].info['bads']
if projs is None:
projs = cp.deepcopy(epochs[0].info['projs'])
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
else:
projs = cp.deepcopy(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if method == 'auto':
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
ok_sklearn = check_version('sklearn', '0.15') is True
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`')
if keep_sample_mean is False:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with `method="empirical"`')
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = [0] * n_epoch_types
n_samples = np.zeros(n_epoch_types, dtype=np.int)
n_epochs = np.zeros(n_epoch_types, dtype=np.int)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
if not all(k in accepted_methods for k in method):
raise ValueError(msg.format(method=method))
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
if ok_sklearn:
cov_data = _compute_covariance_auto(epochs, method=method,
method_params=_method_params,
info=info,
verbose=verbose,
cv=cv,
n_jobs=n_jobs,
# XXX expose later
stop_early=True, # if needed.
picks_list=picks_list,
scalings=scalings)
else:
if _method_params['empirical']['assume_centered'] is True:
cov = epochs.T.dot(epochs) / n_samples_tot
else:
cov = np.cov(epochs.T, bias=1)
cov_data = {'empirical': {'data': cov}}
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= n_samples_tot
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot)
logger.info('Number of samples used : %d' % n_samples_tot)
logger.info('[done]')
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
if ok_sklearn:
msg = ['log-likelihood on unseen data (descending order):']
logliks = [(c['method'], c['loglik']) for c in covs]
logliks.sort(reverse=True, key=lambda c: c[1])
for k, v in logliks:
msg.append('%s: %0.3f' % (k, v))
logger.info('\n '.join(msg))
if ok_sklearn and not return_estimators:
keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
out = covs[np.argmax(scores)]
logger.info('selecting best estimator: {0}'.format(out['method']))
elif ok_sklearn:
out = covs
out.sort(key=lambda c: c['loglik'], reverse=True)
else:
out = covs[0]
return out
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list,
verbose):
"""docstring for _compute_covariance_auto."""
try:
from sklearn.model_selection import GridSearchCV
except Exception: # XXX support sklearn < 0.18
from sklearn.grid_search import GridSearchCV
from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
EmpiricalCovariance)
# rescale to improve numerical stability
_apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
_RegCovariance, _ShrunkCovariance = _get_covariance_classes()
for this_method in method:
data_ = data.copy()
name = this_method.__name__ if callable(this_method) else this_method
logger.info(msg % name.upper())
if this_method == 'empirical':
est = EmpiricalCovariance(**method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'diagonal_fixed':
est = _RegCovariance(info=info, **method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'ledoit_wolf':
shrinkages = []
lw = LedoitWolf(**method_params[this_method])
for ch_type, picks in picks_list:
lw.fit(data_[:, picks])
shrinkages.append((
ch_type,
lw.shrinkage_,
picks
))
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'shrunk':
shrinkage = method_params[this_method].pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
tuned_parameters, cv=cv)
for ch_type, picks in picks_list:
gs.fit(data_[:, picks])
shrinkages.append((
ch_type,
gs.best_estimator_.shrinkage,
picks
))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'pca':
mp = method_params[this_method]
pca, _info = _auto_low_rank_model(data_, this_method,
n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
elif this_method == 'factor_analysis':
mp = method_params[this_method]
fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
logger.info('Using cross-validation to select the best estimator.')
estimators, _, _ = zip(*estimator_cov_info)
logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
# undo scaling
for c in estimator_cov_info:
_undo_scaling_cov(c[1], picks_list, scalings)
out = dict()
estimators, covs, runtime_infos = zip(*estimator_cov_info)
cov_methods = [c.__name__ if callable(c) else c for c in method]
runtime_infos, covs = list(runtime_infos), list(covs)
my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
for this_method, runtime_info, loglik, data, est in my_zip:
out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
if runtime_info is not None:
out[this_method].update(runtime_info)
return out
def _logdet(A):
"""Compute the log det of a symmetric matrix."""
vals = linalg.eigh(A)[0]
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = np.zeros(n_samples)
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Helper to compute cross validation."""
try:
from sklearn.model_selection import cross_val_score
except ImportError:
# XXX support sklearn < 0.18
from sklearn.cross_validation import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""compute latent variable models."""
method_params = cp.deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
elif mode == 'pca':
est = PCA
else:
raise ValueError('Come on, this is not a low rank estimator: %s' %
mode)
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(cp.deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
stop_early is True):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
def _get_covariance_classes():
"""Prepare special cov estimators."""
from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
ShrunkCovariance)
class _RegCovariance(EmpiricalCovariance):
"""Aux class."""
def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
store_precision=False, assume_centered=False):
self.info = info
self.grad = grad
self.mag = mag
self.eeg = eeg
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
EmpiricalCovariance.fit(self, X)
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
eeg=self.eeg, proj=False,
exclude='bads') # ~proj == important!!
self.covariance_ = cov_.data
return self
class _ShrunkCovariance(ShrunkCovariance):
"""Aux class."""
def __init__(self, store_precision, assume_centered, shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
EmpiricalCovariance.fit(self, X)
cov = self.covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples
is the number of samples and n_features is the number of
features. X_test is assumed to be drawn from the same
distribution as the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
from sklearn.covariance import empirical_covariance, log_likelihood
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.get_precision())
return res
return _RegCovariance, _ShrunkCovariance
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : string
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
""""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = linalg.eigh(A, overwrite_a=True)
eigvec = eigvec.T
eig[:-rank] = 0.0
logger.info('Setting small %s eigenvalues to zero.' % ch_type)
if not pca: # No PCA case.
logger.info('Not doing PCA for %s.' % ch_type)
else:
logger.info('Doing PCA for %s.' % ch_type)
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec
@verbose
def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
scalings=None, verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list
The channel names to be considered.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
if noise_cov['diag'] is False:
C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
else:
C = np.diag(noise_cov.data[C_ch_idx])
scalings = _handle_default('scalings_cov_rank', scalings)
# Create the projection operator
proj, ncomp, _ = make_projector(info['projs'], ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
pick_meg = pick_types(info, meg=True, eeg=False, ref_meg=False,
exclude='bads')
pick_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
has_meg = len(C_meg_idx) > 0
has_eeg = len(C_eeg_idx) > 0
# Get the specified noise covariance rank
if rank is not None:
if isinstance(rank, dict):
rank_meg = rank.get('meg', None)
rank_eeg = rank.get('eeg', None)
else:
rank_meg = int(rank)
rank_eeg = None
else:
rank_meg, rank_eeg = None, None
if has_meg:
C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
this_info = pick_info(info, pick_meg)
if rank_meg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_meg_idx)
rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
rank_meg)
if has_eeg:
C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
this_info = pick_info(info, pick_eeg)
if rank_eeg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_eeg_idx)
rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
rank_eeg)
if _needs_eeg_average_ref_proj(info):
warn('No average EEG reference present in info["projs"], covariance '
'may be adversely affected. Consider recomputing covariance using'
' a raw file with an average eeg reference projector added.')
n_chan = len(ch_names)
eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
eig = np.zeros(n_chan, dtype=np.float)
if has_meg:
eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
eig[C_meg_idx] = C_meg_eig
if has_eeg:
eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
eig[C_eeg_idx] = C_eeg_eig
assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
noise_cov = cp.deepcopy(noise_cov)
noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
diag=False, names=ch_names)
return noise_cov
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
**Note:** This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
`compute_covariance` to directly combine estimation with regularization
in a data-driven fashion see the
`faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default true)
Apply or not projections to keep rank of data.
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
compute_covariance
""" # noqa
cov = cp.deepcopy(cov)
info._check_consistency()
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
info_ch_names = info['ch_names']
ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
ch_names_mag = [info_ch_names[i] for i in sel_mag]
ch_names_grad = [info_ch_names[i] for i in sel_grad]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
idx_eeg, idx_mag, idx_grad = [], [], []
for i, ch in enumerate(ch_names):
if ch in ch_names_eeg:
idx_eeg.append(i)
elif ch in ch_names_mag:
idx_mag.append(i)
elif ch in ch_names_grad:
idx_grad.append(i)
else:
raise Exception('channel is unknown type')
C = cov_good['data']
assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
('GRAD', idx_grad, grad)]:
if len(idx) == 0 or reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
if proj:
this_ch_names = [ch_names[k] for k in idx]
P, ncomp, _ = make_projector(projs, this_ch_names)
U = linalg.svd(P)[0][:, :-ncomp]
if ncomp > 0:
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
if proj and ncomp > 0:
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None):
"""Compute a regularized covariance from data using sklearn.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
Data for covariance estimation.
reg : float | str | None (default None)
If not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
if reg is None:
# compute empirical covariance
cov = np.cov(data)
else:
no_sklearn_err = ('the scikit-learn package is missing and '
'required for covariance regularization.')
# use sklearn covariance estimators
if isinstance(reg, float):
if (reg < 0) or (reg > 1):
raise ValueError('0 <= shrinkage <= 1 for '
'covariance regularization.')
try:
import sklearn
sklearn_version = LooseVersion(sklearn.__version__)
from sklearn.covariance import ShrunkCovariance
except ImportError:
raise Exception(no_sklearn_err)
if sklearn_version < '0.12':
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False)
else:
# init sklearn.covariance.ShrunkCovariance estimator
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False,
assume_centered=True)
elif isinstance(reg, string_types):
if reg == 'ledoit_wolf':
try:
from sklearn.covariance import LedoitWolf
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.LedoitWolf estimator
skl_cov = LedoitWolf(store_precision=False,
assume_centered=True)
elif reg == 'oas':
try:
from sklearn.covariance import OAS
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.OAS estimator
skl_cov = OAS(store_precision=False,
assume_centered=True)
else:
raise ValueError("regularization parameter should be "
"'ledoit_wolf' or 'oas'")
else:
raise ValueError("regularization parameter should be "
"of type str or int (got %s)." % type(reg))
# compute regularized covariance using sklearn
cov = skl_cov.fit(data.T).covariance_
return cov
@verbose
def compute_whitener(noise_cov, info, picks=None, rank=None,
scalings=None, verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict
The measurement info.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
W : 2d array
The whitening matrix.
ch_names : list
The channel names.
"""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
ch_names = [info['chs'][k]['ch_name'] for k in picks]
noise_cov = cp.deepcopy(noise_cov)
noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
rank=rank, scalings=scalings)
n_chan = len(ch_names)
W = np.zeros((n_chan, n_chan), dtype=np.float)
#
# Omit the zeroes due to projection
#
eig = noise_cov['eig']
nzero = (eig > 0)
W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
#
# Rows of eigvec are the eigenvectors
#
W = np.dot(W, noise_cov['eigvec'])
W = np.dot(noise_cov['eigvec'].T, W)
return W, ch_names
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data
noise_cov : instance of Covariance
The noise covariance
picks : array-like of int | None
The channel indices to whiten. Can be None to whiten MEG and EEG
data.
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
rank : None | int | dict (default None)
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = cp.deepcopy(evoked)
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True)
W = _get_whitener_data(evoked.info, noise_cov, picks,
diag=diag, rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
scalings=None, verbose=None):
"""Get whitening matrix for a set of data."""
ch_names = [info['ch_names'][k] for k in picks]
noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
info = pick_info(info, picks)
if diag:
noise_cov = cp.deepcopy(noise_cov)
noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
scalings = _handle_default('scalings_cov_rank', scalings)
W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
return W
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
from scipy import sparse
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = read_bad_channels(fid, this)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
def _apply_scaling_array(data, picks_list, scalings):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
data *= scalings[:, np.newaxis] # F - order
def _undo_scaling_array(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_array(data, picks_list, scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, string_types) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def _estimate_rank_meeg_signals(data, info, scalings, tol='auto',
return_singular=False):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape(n_channels, n_samples)
The M/EEG signals.
info : Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e15, grad=1e13, eeg=1e6)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
_apply_scaling_array(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_array(data, picks_list, scalings)
return out
def _estimate_rank_meeg_cov(data, info, scalings, tol='auto',
return_singular=False):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape (n_channels, n_channels)
The M/EEG covariance.
info : Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e12, grad=1e11, eeg=1e5)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', scalings)
_apply_scaling_cov(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_cov(data, picks_list, scalings)
return out
|
bsd-3-clause
|
BhallaLab/moose-full
|
moose-examples/paper-2015/Fig2_elecModels/Fig2C.py
|
2
|
13821
|
########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts.
########################################################################
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('../../../Demos/util')
import rdesigneur as rd
import moogli
PI = 3.14159265359
useGssa = True
combineSegments = True
# Pick your favourite cell here.
#elecFileName = "ca1_minimal.p"
## Cell morphology from Bannister and Larkman J Neurophys 2015/NeuroMorpho
elecFileName = "h10.CNG.swc"
#elecFileName = "CA1.morph.xml"
#elecFileName = "VHC-neuron.CNG.swc"
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
frameRunTime = 1e-3 # 1 ms
baselineTime = 0.05
tetTime = 0.01
postTetTime = 0.01
runtime = baselineTime + tetTime + postTetTime
def buildRdesigneur():
'''
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
'''
cellProto = [ [ "./cells/" + elecFileName, "elec" ] ]
chanProto = [
['./chans/hd.xml'], \
['./chans/kap.xml'], \
['./chans/kad.xml'], \
['./chans/kdr.xml'], \
['./chans/na3.xml'], \
['./chans/nax.xml'], \
['./chans/CaConc.xml'], \
['./chans/Ca.xml'], \
['./chans/NMDA.xml'], \
['./chans/Glu.xml'] \
]
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
chemProto = []
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are:
# p, g, L, len, dia, maxP, maxG, maxL.
# where
# p = path distance from soma, threaded along dendrite
# g = geometrical distance from soma (shortest distance)
# L = electrotonic distance from soma: number of length constants
# len = length of dendritic compartment
# dia = diameter of dendritic compartment
# maxP = maximal value of 'p' for the cell
# maxG = maximal value of 'g' for the cell
# maxL = maximal value of 'L' for the cell
#
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
chanDistrib = [ \
["hd", "#dend#,#apical#", "Gbar", "5e-2*(1+(p*3e4))" ], \
["kdr", "#", "Gbar", "p < 50e-6 ? 500 : 100" ], \
["na3", "#soma#,#dend#,#apical#", "Gbar", "250" ], \
["nax", "#soma#,#axon#", "Gbar", "1250" ], \
["kap", "#axon#,#soma#", "Gbar", "300" ], \
["kap", "#dend#,#apical#", "Gbar", \
"300*(H(100-p*1e6)) * (1+(p*1e4))" ], \
["Ca_conc", "#soma#,#dend#,#apical#", "tau", "0.0133" ], \
["kad", "#soma#,#dend#,#apical#", "Gbar", \
"300*H(p - 100e-6)*(1+p*1e4)" ], \
["Ca", "#dend#,#apical#", "Gbar", "p<160e-6? 10+ p*0.25e-6 : 50" ], \
["Ca", "#soma#", "Gbar", "10" ], \
["glu", "#dend#,#apical#", "Gbar", "200*H(p-200e-6)" ], \
["NMDA", "#dend#,#apical#", "Gbar", "2*H(p-200e-6)" ] \
]
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
chemDistrib = []
######################################################################
# Here we define the mappings across scales. Format:
# sourceObj sourceField destObj destField offset scale
# where the coupling expression is anything a muParser can evaluate,
# using the input variable x. For example: 8e-5 + 300*x
# For now, let's use existing adaptors which take an offset and scale.
######################################################################
adaptorList = []
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rd.addSpineProto() # This adds a version with an LCa channel by default.
rdes = rd.rdesigneur(
useGssa = useGssa, \
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
chanDistrib = chanDistrib, \
chemDistrib = chemDistrib, \
cellProto = cellProto, \
chanProto = chanProto, \
chemProto = chemProto, \
adaptorList = adaptorList
)
#spineProto = spineProto, \
return rdes
def buildPlots( rdes ):
graphs = moose.Neutral( '/graphs' )
vtab = moose.Table( '/graphs/VmTab' )
moose.connect( vtab, 'requestOut', rdes.soma, 'getVm' )
def displayPlots():
pylab.figure(1, figsize = (8,10 ) )
pylab.subplot( 1,1,1)
for i in moose.wildcardFind( "/graphs/#VmTab" ):
t = numpy.arange( 0, i.vector.size, 1 ) * i.dt
pylab.plot( t, i.vector, label = i.name )
pylab.xlabel( "Time (s)" )
pylab.legend()
pylab.title( 'Vm' )
pylab.figure(2, figsize= (8,10))
ax = pylab.subplot( 1,1,1 )
neuron = moose.element( '/model/elec' )
comptDistance = dict( zip( neuron.compartments, neuron.pathDistanceFromSoma ) )
for i in moose.wildcardFind( '/library/#[ISA=ChanBase]' ):
chans = moose.wildcardFind( '/model/elec/#/' + i.name )
print i.name, len( chans )
p = [ 1e6*comptDistance.get( j.parent, 0) for j in chans ]
Gbar = [ j.Gbar/(j.parent.length * j.parent.diameter * PI) for j in chans ]
if len( p ) > 2:
pylab.plot( p, Gbar, linestyle = 'None', marker = ".", label = i.name )
sortedGbar = sorted(zip(p, Gbar), key=lambda x: x[0])
ax.set_yscale( 'log' )
pylab.xlabel( "Distance from soma (microns)" )
pylab.ylabel( "Channel density (Seimens/sq mtr)" )
pylab.legend()
pylab.title( 'Channel distribution' )
pylab.show()
def create_vm_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.0,
1.0,
1.0),
moogli.colors.Color(1.0,
1.0,
0.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in network.shapes.keys()]
network.set("color", vms, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
vms = [moose.element(x).Vm for x in network.shapes.keys()]
network.set("color", vms, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
def postlude(view):
displayPlots()
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(network.shapes.values())
view = moogli.View("vm-view",
prelude=prelude,
interlude=interlude,
postlude=postlude)
viewer.attach_view(view)
return viewer
def create_ca_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path)
ca_elements = []
for compartment_path in network.shapes.keys():
if moose.exists(compartment_path + '/Ca_conc'):
ca_elements.append(moose.element(compartment_path + '/Ca_conc'))
else:
ca_elements.append(moose.element('/library/Ca_conc'))
normalizer = moogli.utilities.normalizer(0.0,
0.002,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(1.0,
0.0,
0.0,
1.0),
moogli.colors.Color(0.0,
1.0,
1.0,
0.1)])
mapper = moogli.utilities.mapper(colormap, normalizer)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
def prelude(view):
view.pitch(PI/2)
view.zoom(0.4)
def interlude(view):
moose.start(frameRunTime)
cas = [element.Ca for element in ca_elements]
network.set("color", cas, mapper)
view.yaw(0.01)
currTime = moose.element('/clock').currentTime
if currTime < runtime:
deliverStim(currTime)
else:
view.stop()
viewer = moogli.Viewer("ca-viewer")
viewer.attach_shapes(network.shapes.values())
view = moogli.View("ca-view",
prelude=prelude,
interlude=interlude)
viewer.attach_view(view)
return viewer
def build3dDisplay(rdes):
print "building 3d Display"
app = QtGui.QApplication(sys.argv)
vm_viewer = create_vm_viewer(rdes)
vm_viewer.resize(700, 900)
vm_viewer.show()
vm_viewer.start()
ca_viewer = create_ca_viewer(rdes)
ca_viewer.resize(700, 900)
ca_viewer.show()
ca_viewer.start()
return app.exec_()
def deliverStim( currTime ):
if currTime > baselineTime and currTime < baselineTime + tetTime:
# deliver tet stim
step = int ( (currTime - baselineTime) / frameRunTime )
tetStep = int( 1.0 / (tetanusFrequency * frameRunTime ) )
if step % tetStep == 0:
for i in synDendList:
i.activation( tetanusAmplitude )
for i in synSpineList:
i.activation( tetanusAmplitudeForSpines )
else:
# deliver probe stim
step = int (currTime / frameRunTime )
probeStep = int( probeInterval / frameRunTime )
if step % probeStep == 0:
print "Doing probe Stim at ", currTime
for i in synSpineList:
i.activation( probeAmplitude )
def main():
global synSpineList
global synDendList
numpy.random.seed( 1234 )
rdes = buildRdesigneur()
rdes.buildModel( '/model' )
assert( moose.exists( '/model' ) )
synSpineList = moose.wildcardFind( "/model/elec/#head#/glu,/model/elec/#head#/NMDA" )
temp = set( moose.wildcardFind( "/model/elec/#/glu,/model/elec/#/NMDA" ) )
synDendList = list( temp - set( synSpineList ) )
print "num spine, dend syns = ", len( synSpineList ), len( synDendList )
moose.reinit()
#for i in moose.wildcardFind( '/model/elec/#apical#/#[ISA=CaConcBase]' ):
#print i.path, i.length, i.diameter, i.parent.length, i.parent.diameter
buildPlots(rdes)
# Run for baseline, tetanus, and post-tetanic settling time
t1 = time.time()
build3dDisplay(rdes)
print 'real time = ', time.time() - t1
if __name__ == '__main__':
main()
|
gpl-2.0
|
pandas-ml/pandas-ml
|
pandas_ml/skaccessors/test/test_cross_decomposition.py
|
2
|
5244
|
#!/usr/bin/env python
import pytest
import numpy as np
import pandas as pd
import sklearn.cross_decomposition as cd
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestCrossDecomposition(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.cross_decomposition.PLSRegression, cd.PLSRegression)
self.assertIs(df.cross_decomposition.PLSCanonical, cd.PLSCanonical)
self.assertIs(df.cross_decomposition.CCA, cd.CCA)
self.assertIs(df.cross_decomposition.PLSSVD, cd.PLSSVD)
def test_CCA(self):
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [3., 5., 4.]]
Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
df = pdml.ModelFrame(X, target=Y)
mod1 = df.cross_decomposition.CCA(n_components=1)
mod2 = cd.CCA(n_components=1)
df.fit(mod1)
mod2.fit(X, Y)
# 2nd cols are different on travis-CI
self.assert_numpy_array_almost_equal(mod1.x_weights_[:, 0],
mod2.x_weights_[:, 0])
self.assert_numpy_array_almost_equal(mod1.y_weights_[:, 0],
mod2.y_weights_[:, 0])
result = df.transform(mod1)
expected = mod2.transform(X, Y)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.data.values.reshape(4),
expected[0].reshape(4))
self.assert_numpy_array_almost_equal(result.target.values.reshape(4),
expected[1].reshape(4))
@pytest.mark.parametrize("algo", ['CCA', 'PLSCanonical'])
def test_CCA_PLSCannonical(self, algo):
n = 500
with tm.RNGContext(1):
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
train = pdml.ModelFrame(X_train, target=Y_train)
test = pdml.ModelFrame(X_test, target=Y_test)
# check multi target columns
self.assertTrue(train.has_target())
tm.assert_numpy_array_equal(train.data.values, X_train)
tm.assert_numpy_array_equal(train.target.values, Y_train)
tm.assert_numpy_array_equal(test.data.values, X_test)
tm.assert_numpy_array_equal(test.target.values, Y_test)
expected = pd.MultiIndex.from_tuples([('.target', 0), ('.target', 1),
('.target', 2), ('.target', 3)])
tm.assert_index_equal(train.target_name, expected)
self.assertEqual(train.data.shape, X_train.shape)
self.assertEqual(train.target.shape, Y_train.shape)
mod1 = getattr(train.cross_decomposition, algo)(n_components=2)
mod2 = getattr(cd, algo)(n_components=2)
train.fit(mod1)
mod2.fit(X_train, Y_train)
# 2nd cols are different on travis-CI
self.assert_numpy_array_almost_equal(mod1.x_weights_[:, 0],
mod2.x_weights_[:, 0])
self.assert_numpy_array_almost_equal(mod1.y_weights_[:, 0],
mod2.y_weights_[:, 0])
result_tr = train.transform(mod1)
result_test = test.transform(mod1)
expected_tr = mod2.transform(X_train, Y_train)
expected_test = mod2.transform(X_test, Y_test)
self.assertIsInstance(result_tr, pdml.ModelFrame)
self.assertIsInstance(result_test, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result_tr.data.values[:, 0],
expected_tr[0][:, 0])
self.assert_numpy_array_almost_equal(result_tr.target.values[:, 0],
expected_tr[1][:, 0])
self.assert_numpy_array_almost_equal(result_test.data.values[:, 0],
expected_test[0][:, 0])
self.assert_numpy_array_almost_equal(result_test.target.values[:, 0],
expected_test[1][:, 0])
def test_PLSRegression(self):
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
df = pdml.ModelFrame(X, target=Y)
pls1 = df.cross_decomposition.PLSRegression(n_components=3)
df.fit(pls1)
result = df.predict(pls1)
pls2 = cd.PLSRegression(n_components=3)
pls2.fit(X, Y)
expected = pls2.predict(X)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.values, expected)
|
bsd-3-clause
|
cybernet14/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
astroML/astroML
|
astroML/density_estimation/density_estimation.py
|
2
|
3150
|
"""
Tools for density estimation
See also:
- sklearn.mixture.gmm : gaussian mixture models
- sklearn.neighbors.KernelDensity : Kernel Density Estimation (version 0.14+)
- astroML.density_estimation.XDGMM : extreme deconvolution
- scipy.spatial.gaussian_kde : a gaussian KDE implementation
"""
import numpy as np
from scipy import special
from sklearn.base import BaseEstimator
from sklearn.neighbors import BallTree
def n_volume(r, n):
"""compute the n-volume of a sphere of radius r in n dimensions"""
return np.pi ** (0.5 * n) / special.gamma(0.5 * n + 1) * (r ** n)
class KNeighborsDensity(BaseEstimator):
"""K-neighbors density estimation
Parameters
----------
method : string
method to use. Must be one of ['simple'|'bayesian'] (see below)
n_neighbors : int
number of neighbors to use
Notes
-----
The two methods are as follows:
- simple:
The density at a point x is estimated by n(x) ~ k / r_k^n
- bayesian:
The density at a point x is estimated by n(x) ~ sum_{i=1}^k[1 / r_i^n].
See Also
--------
KDE : kernel density estimation
"""
def __init__(self, method='bayesian', n_neighbors=10):
if method not in ['simple', 'bayesian']:
raise ValueError("method = %s not recognized" % method)
self.n_neighbors = n_neighbors
self.method = method
def fit(self, X):
"""Train the K-neighbors density estimator
Parameters
----------
X : array_like
array of points to use to train the KDE. Shape is
(n_points, n_dim)
"""
self.X_ = np.atleast_2d(X)
if self.X_.ndim != 2:
raise ValueError('X must be two-dimensional')
self.bt_ = BallTree(self.X_)
return self
def eval(self, X):
"""Evaluate the kernel density estimation
Parameters
----------
X : array_like
array of points at which to evaluate the KDE. Shape is
(n_points, n_dim), where n_dim matches the dimension of
the training points.
Returns
-------
dens : ndarray
array of shape (n_points,) giving the density at each point.
The density will be normalized for metric='gaussian' or
metric='tophat', and will be unnormalized otherwise.
"""
X = np.atleast_2d(X)
if X.ndim != 2:
raise ValueError('X must be two-dimensional')
if X.shape[1] != self.X_.shape[1]:
raise ValueError('dimensions of X do not match training dimension')
dist, ind = self.bt_.query(X, self.n_neighbors, return_distance=True)
k = float(self.n_neighbors)
ndim = X.shape[1]
if self.method == 'simple':
return k / n_volume(dist[:, -1], ndim)
elif self.method == 'bayesian':
# XXX this may be wrong in more than 1 dimension!
return (k * (k + 1) * 0.5 / n_volume(1, ndim)
/ (dist ** ndim).sum(1))
else:
raise ValueError("Unrecognized method '%s'" % self.method)
|
bsd-2-clause
|
uahic/nest-simulator
|
doc/nest_by_example/scripts/one_neuron_with_sine_wave.py
|
13
|
1515
|
# -*- coding: utf-8 -*-
#
# one_neuron_with_sine_wave.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.voltage_trace
nest.ResetKernel()
neuron = nest.Create('iaf_neuron')
sine = nest.Create('ac_generator', 1,
{'amplitude': 100.0,
'frequency': 2.0})
noise = nest.Create('poisson_generator', 2,
[{'rate': 70000.0},
{'rate': 20000.0}])
voltmeter = nest.Create('voltmeter',1,
{'withgid': True})
nest.Connect(sine, neuron)
nest.Connect(voltmeter, neuron)
nest.Connect(noise[:1], neuron, syn_spec={'weight': 1.0, 'delay': 1.0})
nest.Connect(noise[1:], neuron, syn_spec={'weight': -1.0, 'delay': 1.0})
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
import matplotlib.pyplot as plt
plt.savefig('../figures/voltage_trace.eps')
|
gpl-2.0
|
ZYYSzj/Selective-Joint-Fine-tuning
|
caffe_zyyszj/examples/finetune_flickr_style/assemble_data.py
|
38
|
3636
|
#!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
|
bsd-2-clause
|
charlietsai/catmap
|
catmap/analyze/analysis_base.py
|
1
|
34541
|
import catmap
from catmap import ReactionModelWrapper
from catmap.model import ReactionModel as RM
from catmap import griddata
from copy import copy
try:
from scipy.stats import norm
except:
norm = None
from matplotlib.ticker import MaxNLocator
import os
import math
plt = catmap.plt
pickle = catmap.pickle
np = catmap.np
spline = catmap.spline
mtransforms = catmap.mtransforms
basic_colors = [[0, 0, 0], [0, 0, 1], [0.1, 1, 0.1], [1, 0, 0], [0, 1, 1], [1, 0.5, 0], [1, 0.9, 0],
[1, 0, 1], [0, 0.5, 0.5], [0.5, 0.25, 0.15], [0.5, 0.5, 0.5]]
# black,blue,green,red,cyan,orange,yellow,magenta,turquoise,brown,gray
def get_colors(n_colors):
"""
Get n colors from basic_colors.
:param n_colors: Number of colors
:type n_colors: int
"""
if n_colors < len(basic_colors):
return basic_colors[0:n_colors]
else:
longlist = basic_colors * n_colors
return longlist[0:n_colors]
def boltzmann_vector(energy_list, vector_list, temperature):
"""
Create a vector which is a Boltzmann average of the vector_list weighted
with energies in the energy_list.
:param energy_list: List of energies
:type energy_list: list
:param vector_list: List of vectors
:type energy_list: list
:param temperature: Temperature
:type energy_list: float
"""
def boltzmann_avg(es, ns, T):
"""
Calculate the Boltzmann average
:param es: energies
:type es: iterable
:param ns:
:type ns: iterable
:param T: temperature
:type T: float
..todo: description for ns
"""
kB = 8.613e-5 # assuming energies are in eV and T is in K
es = [e - min(es) for e in es] # normalize to minimum energy
exp_sum = sum([np.exp(-e / (kB * T)) for e in es])
exp_weighted = [n * np.exp(-e / (kB * T)) / exp_sum for n, e in zip(ns, es)]
Z = sum(exp_weighted)
return Z
vars = zip(*vector_list)
boltz_vec = [boltzmann_avg(energy_list, v, temperature) for v in vars]
return boltz_vec
class MapPlot:
"""
Class for generating plots using a dictionary of default plotting attributes.
The following attributes can be modified:
:param resolution_enhancement: Resolution enhancement for interpolated maps
:type resolution_enhancement: int
:param min: Minimum
:type min:
:param max: Maximum
:type max:
:param n_ticks: Number of ticks
:type n_ticks: int
:param descriptor_labels: Label of descriptors
:type descriptor_labels: list
:param default_descriptor_pt_args: Dictionary of descriptor point arguments
:type default_descriptor_pt_args: dict
:param default_descriptor_label_args: Dictionary of descriptor labels
:type default_descriptor_label_args: dict
:param descriptor_pt_args:
:type descriptor_pt_args: dict
:param include_descriptors: Include the descriptors
:type include_descriptors: bool
:param plot_size: Size of the plot
:type plot_size: int
:param aspect:
:type aspect:
:param subplots_adjust_kwargs: Dictionary of keyword arguments for adjusting matplotlib subplots
:type subplots_adjust_kwargs: dict
.. todo:: Some missing descriptions
"""
def __init__(self):
defaults = dict(
resolution_enhancement=1,
min=None,
max=None,
n_ticks=8,
plot_function=None,
colorbar=True,
colormap=plt.cm.jet,
axis_label_decimals=2,
log_scale=False,
descriptor_labels=['X_descriptor', 'Y_descriptor'],
default_descriptor_pt_args={'marker': 'o'},
default_descriptor_label_args={},
descriptor_pt_args={},
descriptor_label_args={},
include_descriptors=False,
plot_size=4,
aspect=None,
subplots_adjust_kwargs={'hspace': 0.35, 'wspace': 0.35,
'bottom': 0.15}
)
for key in defaults:
val = defaults[key]
if not hasattr(self, key):
setattr(self, key, val)
elif getattr(self, key) is None:
setattr(self, key, val)
def update_descriptor_args(self):
"""
Update descriptor arguments
.. todo:: __doc__
"""
if getattr(self, 'descriptor_dict', None):
if self.descriptor_pt_args == {}:
for pt in self.descriptor_dict:
self.descriptor_pt_args[pt] = copy(
self.default_descriptor_pt_args)
if self.descriptor_label_args == {}:
for pt in self.descriptor_dict:
self.descriptor_label_args[pt] = copy(
self.default_descriptor_label_args)
def plot_descriptor_pts(self, mapp, idx, ax, plot_in=None):
"""
Plot descriptor points
:param mapp:
:type mapp:
:param idx:
:type idx:
:param ax: axes object
:param plot_in:
:type plot_in:
.. todo:: __doc__
"""
if getattr(self, 'descriptor_dict', None):
self.update_descriptor_args()
xy, rates = zip(*mapp)
dim = len(xy[0])
for key in self.descriptor_dict:
pt_kwargs = self.descriptor_pt_args.get(key,
self.default_descriptor_pt_args)
lab_kwargs = self.descriptor_label_args.get(key,
self.default_descriptor_label_args)
if dim == 1: # x will be descriptor values. y will be rate/coverage/etc.
x, y = self.descriptor_dict[key]
y_sp = catmap.spline(plot_in[0], plot_in[1], k=1)
y = y_sp(x)
elif dim == 2:
x, y = self.descriptor_dict[key]
if None not in [x, y]:
if pt_kwargs is not None:
ax.errorbar(x, y, **pt_kwargs)
if lab_kwargs is not None:
ax.annotate(key, [x, y], **lab_kwargs)
if dim == 1:
ax.set_xlim(self.descriptor_ranges[0])
elif dim == 2:
ax.set_xlim(self.descriptor_ranges[0])
ax.set_ylim(self.descriptor_ranges[1])
def plot_single(self, mapp, rxn_index, ax=None,
overlay_map=None, alpha_range=None,
**plot_args):
"""
:param mapp:
:param rxn_index: Index for the reaction
:type rxn_index: int
:param ax: axes object
:param overlay_map:
:type overlay_map:
:type alpha_range:
:type alpha_range:
.. todo:: __doc__
"""
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
xy, rates = zip(*mapp)
dim = len(xy[0])
if dim == 1:
x = zip(*xy)[0]
descriptor_ranges = [[min(x), max(x)]]
if not self.plot_function:
if self.log_scale == True:
self.plot_function = 'semilogy'
else:
self.plot_function = 'plot'
elif dim == 2:
x, y = zip(*xy)
descriptor_ranges = [[min(x), max(x)], [min(y), max(y)]]
if not self.plot_function:
self.plot_function = 'contourf'
if 'cmap' not in plot_args:
plot_args['cmap'] = self.colormap
eff_res = self.resolution * self.resolution_enhancement
if self.min:
minval = self.min
else:
minval = None
maparray = RM.map_to_array(mapp, descriptor_ranges, eff_res,
log_interpolate=self.log_scale, minval=minval)
if self.max is None:
self.max = maparray.T[rxn_index].max()
if self.min is None:
self.min = maparray.T[rxn_index].min()
if dim == 2:
if maparray.min() <= self.min:
plot_args['extend'] = 'min'
if maparray.max() >= self.max:
plot_args['extend'] = 'max'
if maparray.max() >= self.max and maparray.min() <= self.min:
plot_args['extend'] = 'both'
if 'extend' not in plot_args:
plot_args['extend'] = 'neither'
if self.log_scale and dim == 2:
maparray = np.log10(maparray)
min_val = np.log10(float(self.min))
max_val = np.log10(float(self.max))
if min_val < -200:
min_val = max(maparray.min(), -200)
elif max_val == np.inf:
max_val = min(maparray.max(), 200)
else:
min_val = self.min
max_val = self.max
maparray = np.clip(maparray, min_val, max_val)
log_scale = self.log_scale
if overlay_map:
overlay_array = RM.map_to_array(overlay_map,
descriptor_ranges, eff_res)
if alpha_range:
alpha_min, alpha_max = alpha_range
else:
alpha_min = overlay_array.min()
alpha_max = overlay_array.max()
overlay_array = (overlay_array - overlay_array.min())
overlay_array = overlay_array / (alpha_max - alpha_min)
overlay_array = np.clip(overlay_array, 0, 1)
maparray = np.clip(maparray, min_val, max_val)
norm_array = (maparray - maparray.min())
norm_array = norm_array / (maparray.max() - maparray.min())
maparray = norm_array * overlay_array
maparray = (maparray - maparray.min())
maparray = maparray / (maparray.max() - maparray.min())
maparray = maparray * (max_val - min_val) + min_val
maparray = norm_array * overlay_array
norm_array = (maparray - maparray.min())
norm_array = norm_array / (maparray.max() - maparray.min())
maparray = norm_array * (max_val - min_val) + min_val
if dim == 1:
x_range = descriptor_ranges[0]
plot_in = [np.linspace(*x_range + eff_res), maparray[:, rxn_index]]
plot = getattr(ax, self.plot_function)(*plot_in)
elif dim == 2:
x_range, y_range = descriptor_ranges
z = maparray[:, :, rxn_index]
if self.log_scale:
levels = range(int(min_val), int(max_val) + 1)
if len(levels) < 3 * self.n_ticks:
levels = np.linspace(
int(min_val), int(max_val), 3 * self.n_ticks)
else:
levels = np.linspace(min_val, max_val, min(eff_res, 25))
plot_in = [np.linspace(*x_range + [eff_res[0]]),
np.linspace(*y_range + [eff_res[1]]), z, levels]
plot = getattr(ax, self.plot_function)(*plot_in, **plot_args)
pos = ax.get_position()
if self.aspect:
ax.set_aspect(self.aspect)
ax.apply_aspect()
if dim == 1:
ax.set_xlim(descriptor_ranges[0])
ax.set_xlabel(self.descriptor_labels[0])
ax.set_ylim([float(self.min), float(self.max)])
elif dim == 2:
if self.colorbar:
if log_scale: # take only integer tick labels
cbar_nums = range(int(min_val), int(max_val) + 1)
mod = max(int(len(cbar_nums) / self.n_ticks), 1)
cbar_nums = [n for i, n in enumerate(cbar_nums) if not i % mod]
cbar_nums = np.array(cbar_nums)
else:
cbar_nums = np.linspace(min_val, max_val, self.n_ticks)
formatstring = '%.' + str(self.axis_label_decimals) + 'g'
cbar_labels = [formatstring % (s,) for s in cbar_nums]
cbar_labels = [lab.replace('e-0', 'e-').replace('e+0', 'e')
for lab in cbar_labels]
plot.set_clim(min_val, max_val)
fig = ax.get_figure()
axpos = list(ax.get_position().bounds)
xsize = axpos[2] * 0.04
ysize = axpos[3]
xp = axpos[0] + axpos[2] + 0.04 * axpos[2]
yp = axpos[1]
cbar_box = [xp, yp, xsize, ysize]
cbar_ax = fig.add_axes(cbar_box)
cbar = fig.colorbar(mappable=plot, ticks=cbar_nums,
cax=cbar_ax, extend=plot_args['extend'])
cbar.ax.set_yticklabels(cbar_labels)
if getattr(self, 'colorbar_label', None):
cbar_kwargs = getattr(self, 'colorbar_label_kwargs', {'rotation': -90})
cbar_ax.set_ylabel(self.colorbar_label, **cbar_kwargs)
if self.descriptor_labels:
ax.set_xlabel(self.descriptor_labels[0])
ax.set_ylabel(self.descriptor_labels[1])
ax.set_xlim(descriptor_ranges[0])
ax.set_ylim(descriptor_ranges[1])
if 'title' in plot_args and plot_args['title']:
if 'title_size' not in plot_args:
n_pts = self.plot_size * 72
font_size = min([n_pts / len(plot_args['title']), 14])
else:
font_size = plot_args['title_size']
ax.set_title(plot_args['title'], size=font_size)
if getattr(self, 'n_xticks', None):
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if getattr(self, 'n_yticks', None):
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
self.plot_descriptor_pts(mapp, rxn_index, ax=ax, plot_in=plot_in)
return ax
def plot_separate(self, mapp, ax_list=None, indices=None,
overlay_map=None, **plot_single_kwargs):
"""
Generate separate plots
.. todo:: __doc__
"""
pts, rates = zip(*mapp)
if indices is None:
indices = range(0, len(rates[0]))
n_plots = len(indices)
if not ax_list:
x = int(np.sqrt(n_plots))
if x * x < n_plots:
y = x + 1
else:
y = x
if x * y < n_plots:
x = x + 1
if self.colorbar:
fig = plt.figure(
figsize=(y * self.plot_size * 1.25, x * self.plot_size))
else:
fig = plt.figure(figsize=(y * self.plot_size, x * self.plot_size))
ax_list = []
for i in range(0, n_plots):
ax_list.append(fig.add_subplot(x, y, i + 1))
else:
fig = ax_list[0].get_figure()
if fig:
fig.subplots_adjust(**self.subplots_adjust_kwargs)
else:
fig = plt.gcf()
fig.subplots_adjust(**self.subplots_adjust_kwargs)
plotnum = 0
old_dict = copy(self.__dict__)
if not self.min or not self.max:
for id, i in enumerate(indices):
pts, datas = zip(*mapp)
dat_min = 1e99
dat_max = -1e99
for col in zip(*datas):
if min(col) < dat_min:
dat_min = min(col)
if max(col) > dat_max:
dat_max = max(col)
if self.min is None:
self.min = dat_min
if self.max is None:
self.max = dat_max
for id, i in enumerate(indices):
kwargs = plot_single_kwargs
if self.map_plot_labels:
try:
kwargs['title'] = self.map_plot_labels[i]
except IndexError:
kwargs['title'] = ''
kwargs['overlay_map'] = overlay_map
self.__dict__.update(old_dict)
self.plot_single(mapp, i, ax=ax_list[plotnum], **kwargs)
plotnum += 1
return fig
def plot_weighted(self, mapp, ax=None, weighting='linear',
second_map=None, indices=None, **plot_args):
"""
Generate weighted plot
:param mapp:
:type mapp:
:param ax: axes object
:param weighting: weighting function, 'linear' or 'dual'.
:type weighting: str
:param second_map:
:param indices:
.. todo:: __doc__
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
if self.color_list is None:
color_list = get_colors(len(mapp[0][-1]) + 1)
color_list.pop(0) # remove black
else:
color_list = self.color_list
pts, datas = zip(*mapp)
if indices is None:
indices = range(0, len(datas[0]))
rgbs = []
datas = zip(*datas)
datas = [d for id, d in enumerate(datas) if id in indices]
datas = zip(*datas)
if second_map:
pts2, datas2 = zip(*second_map)
datas2 = zip(*datas2)
datas2 = [d for id, d in enumerate(datas2) if id in indices]
datas2 = zip(*datas2)
else:
datas2 = datas
for data, data2 in zip(datas, datas2):
if weighting == 'linear':
rs, gs, bs = zip(*color_list)
r = 1 - sum(float((1 - ri) * di) for ri, di in zip(rs, data))
g = 1 - sum(float((1 - gi) * di) for gi, di in zip(gs, data))
b = 1 - sum(float((1 - bi) * di) for bi, di in zip(bs, data))
eff_res = self.resolution * self.resolution_enhancement
rgbs.append([r, g, b])
elif weighting == 'dual':
rs, gs, bs = zip(*color_list)
r = 1 - sum(float((1 - ri) * di * d2i)
for ri, di, d2i in zip(rs, data, data2))
g = 1 - sum(float((1 - gi) * di * d2i)
for gi, di, d2i in zip(gs, data, data2))
b = 1 - sum(float((1 - bi) * di * d2i)
for bi, di, d2i in zip(bs, data, data2))
eff_res = 300
rgbs.append([r, g, b])
r, g, b = zip(*rgbs)
x, y = zip(*pts)
xi = np.linspace(min(x), max(x), eff_res)
yi = np.linspace(min(y), max(y), eff_res)
ri = griddata(x, y, r, xi, yi)
gi = griddata(x, y, g, xi, yi)
bi = griddata(x, y, b, xi, yi)
rgb_array = np.zeros((eff_res, eff_res, 3))
for i in range(0, eff_res):
for j in range(0, eff_res):
rgb_array[i, j, 0] = ri[i, j]
rgb_array[i, j, 1] = gi[i, j]
rgb_array[i, j, 2] = bi[i, j]
xminmax, yminmax = self.descriptor_ranges
xmin, xmax = xminmax
ymin, ymax = yminmax
ax.imshow(rgb_array, extent=[xmin, xmax, ymin, ymax], origin='lower')
self.plot_descriptor_pts(mapp, i, ax)
if getattr(self, 'n_xticks', None):
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if getattr(self, 'n_yticks', None):
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
ax.set_xlabel(self.descriptor_labels[0])
ax.set_ylabel(self.descriptor_labels[1])
if self.aspect:
ax.set_aspect(self.aspect)
ax.apply_aspect()
return fig
def save(self, fig, save=True, default_name='map_plot.pdf'):
"""
:param fig: figure object
:param save: save the figure
:type save: bool
:param default_name: default name for the saved figure.
:type default: str
"""
if save == True:
if not hasattr(self, 'output_file'):
save = default_name
else:
save = self.output_file
if save:
fig.savefig(save)
class MechanismPlot:
"""
Class for generating potential energy diagrams
:param energies: list of energies
:type energies: list
:param barriers: list of barriers
:type barriers: list
:param labels: list of labels
:type labels: list
"""
def __init__(self, energies, barriers=[], labels=[]):
self.energies = energies
self.barriers = barriers
self.labels = labels
self.energy_line_args = {'color': 'k', 'lw': 2}
self.barrier_line_args = {'color': 'k', 'lw': 2}
self.label_args = {'color': 'k', 'size': 16, 'rotation': 45}
self.label_positions = None
self.initial_energy = 0
self.initial_stepnumber = 0
self.energy_mode = 'relative' # absolute
self.energy_line_widths = 0.5
def draw(self, ax=None):
"""
Draw the potential energy diagram
.. todo:: __doc__
"""
def attr_to_list(attrname, required_length=len(self.energies)):
"""
Return list of attributes
:param attrname: Name of attributes
:type attrname: list
:param required_length: Required length for the list of attributes
:type required_length: int
.. todo:: __doc__
"""
try:
getattr(self, attrname)[0] # Ensure that it is a list
iter(getattr(self, attrname)) # Ensure that it is a list...
if len(getattr(self, attrname)) == required_length:
pass
else:
raise ValueError(attrname + ' list is of length ' +
str(len(getattr(self, attrname))) +
', but needs to be of length ' +
str(required_length))
return getattr(self, attrname)
except:
return [getattr(self, attrname)] * required_length
barrier_line_args = attr_to_list('barrier_line_args',
len(self.energies) - 1)
energy_line_widths = attr_to_list('energy_line_widths')
energy_line_args = attr_to_list('energy_line_args')
label_args = attr_to_list('label_args')
label_positions = attr_to_list('label_positions')
# plot energy lines
energy_list = np.array(self.energies)
energy_list = (energy_list - energy_list[0])
energy_list = list(energy_list)
if self.energy_mode == 'relative':
cum_energy = [energy_list[0]]
for i, e in enumerate(energy_list[1:]):
last = cum_energy[i] + e
cum_energy.append(last)
energy_list = cum_energy
energy_list = np.array(energy_list) + self.initial_energy
energy_list = list(energy_list)
energy_lines = [
[[i + self.initial_stepnumber, i + width + self.initial_stepnumber],
[energy_list[i]] * 2]
for i, width in enumerate(energy_line_widths)]
self.energy_lines = energy_lines
for i, line in enumerate(energy_lines):
ax.plot(*line, **energy_line_args[i])
# create barrier lines
barrier_lines = []
if not self.barriers:
self.barriers = [0] * (len(self.energies) - 1)
for i, barrier in enumerate(self.barriers):
xi = energy_lines[i][0][1]
xf = energy_lines[i + 1][0][0]
yi = energy_lines[i][1][0]
yf = energy_lines[i + 1][1][0]
if self.energy_mode == 'relative' and (barrier == 0 or barrier <= yf - yi):
line = [[xi, xf], [yi, yf]]
xts = (xi + xf) / 2.
yts = max([yi, yf])
elif self.energy_mode == 'absolute' and (barrier <= yf or barrier <= yi):
line = [[xi, xf], [yi, yf]]
xts = (xi + xf) / 2.
yts = max([yi, yf])
else:
if self.energy_mode == 'relative':
yts = yi + barrier
elif self.energy_mode == 'absolute':
yts = barrier
barrier = yts - yi
barrier_rev = barrier + (yi - yf)
if barrier > 0 and barrier_rev > 0:
ratio = np.sqrt(barrier) / (np.sqrt(barrier) + np.sqrt(barrier_rev))
else:
print 'Warning: Encountered barrier less than 0'
ratio = 0.0001
yts = max(yi, yf)
xts = xi + ratio * (xf - xi)
xs = [xi, xts, xf]
ys = [yi, yts, yf]
f = spline(xs, ys, k=2)
newxs = np.linspace(xi, xf, 20)
newys = f(newxs)
line = [newxs, newys]
barrier_lines.append(line)
self.barrier_lines = barrier_lines
# plot barrier lines
for i, line in enumerate(barrier_lines):
ax.plot(*line, **barrier_line_args[i])
# add labels
trans = ax.get_xaxis_transform()
for i, label in enumerate(self.labels):
xpos = sum(energy_lines[i][0]) / len(energy_lines[i][0])
label_position = label_positions[i]
args = label_args[i]
if label_position in ['top', 'ymax']:
if 'ha' not in args:
args['ha'] = 'left'
if 'va' not in args:
args['va'] = 'bottom'
ypos = 1
args['transform'] = trans
ax.text(xpos, ypos, label, **args)
elif label_position in ['bot', 'bottom', 'ymin']:
ypos = -0.1
ax.xaxis.set_ticks([float(sum(line[0]) / len(line[0]))
for line in energy_lines])
ax.set_xticklabels(self.labels)
for attr in args.keys():
try:
[getattr(t, 'set_' + attr)(args[attr])
for t in ax.xaxis.get_ticklabels()]
except:
pass
elif label_position in ['omit']:
pass
else:
ypos = energy_lines[i][1][0]
if 'ha' not in args: # and 'textcoords' not in args:
args['ha'] = 'left'
if 'va' not in args: # and 'textcoords' not in args:
args['va'] = 'bottom'
ax.annotate(label, [xpos, ypos], **args)
class ScalingPlot:
"""
:param descriptor_names: list of descriptor names
:type descriptor_names: list
:param descriptor_dict: dictionary of descriptors
:type descriptor_dict: dict
:param surface_names: list of the surface names
:type surface_names: list
:param parameter_dict: dictionary of parameters
:type parameter_dict: dict
:param scaling_function: function to project descriptors into energies.
Should take descriptors as an argument and return a
dictionary of {adsorbate:energy} pairs.
:type scaling_function: function
:param x_axis_function: function to project descriptors onto the x-axis.
Should take descriptors as an argument and return a
dictionary of {adsorbate:x_value} pairs.
:type x_axis_function: function
:param scaling_function_kwargs: keyword arguments for scaling_function.
:type scaling_function_kwargs: dict
:param x_axis_function_kwargs: keyword arguments for x_axis_function.
:type x_axis_function_kwargs: dict
"""
def __init__(self, descriptor_names, descriptor_dict, surface_names,
parameter_dict, scaling_function, x_axis_function,
scaling_function_kwargs={}, x_axis_function_kwargs={},
):
self.descriptor_names = descriptor_names
self.surface_names = surface_names
self.descriptor_dict = descriptor_dict
self.parameter_dict = parameter_dict
self.scaling_function = scaling_function
self.scaling_function_kwargs = scaling_function_kwargs
self.x_axis_function = x_axis_function
self.x_axis_function_kwargs = x_axis_function_kwargs
self.axis_label_size = 16
self.surface_label_size = 16
self.title_size = 18
self.same_scale = True
self.show_titles = True
self.show_surface_labels = True
self.subplots_adjust_kwargs = {'wspace': 0.4, 'hspace': 0.4}
self.x_label_dict = {}
self.y_label_dict = {}
self.surface_colors = []
self.scaling_line_args = {}
self.label_args = {}
self.line_args = {}
self.include_empty = True
self.include_error_histogram = True
def plot(self, ax_list=None, plot_size=4.0, save=None):
"""
:param ax_list: list of axes objects
:type ax_list: [ax]
:param plot_size: size of the plot
:type plot_size: float
:param save: whether or not to save the plot
:type save: bool
.. todo:: __doc__
"""
all_ads = self.adsorbate_names + self.transition_state_names
all_ads = [a for a in all_ads if a in self.parameter_dict.keys() and
a not in self.echem_transition_state_names]
if self.include_empty:
ads_names = all_ads
else:
ads_names = [n for n in all_ads if
(None in self.parameter_dict[n] or
sum(self.parameter_dict[n]) > 0.0)]
if not self.surface_colors:
self.surface_colors = get_colors(len(self.surface_names))
if not self.scaling_line_args:
self.scaling_line_args = [{'color': 'k'}] * len(ads_names)
elif hasattr(self.scaling_line_args, 'update'): # its a dictionary if so.
self.scaling_line_args = [self.scaling_line_args] * len(
self.adsorbate_names)
for d in self.descriptor_names:
if not self.include_descriptors:
if d in ads_names:
ads_names.remove(d)
if self.include_error_histogram:
extra = 1
else:
extra = 0
if not ax_list:
spx = round(np.sqrt(len(ads_names) + extra))
spy = round(np.sqrt(len(ads_names) + extra))
if spy * spx < len(ads_names) + extra:
spy += 1
fig = plt.figure(figsize=(spy * plot_size, spx * plot_size))
ax_list = [fig.add_subplot(spx, spy, i + 1)
for i in range(len(ads_names))]
else:
fig = None
all_xs, all_ys = zip(*[self.descriptor_dict[s]
for s in self.surface_names])
fig.subplots_adjust(**self.subplots_adjust_kwargs)
all_ys = []
maxyrange = 0
ymins = []
all_err = []
for i, ads in enumerate(ads_names):
actual_y_vals = self.parameter_dict[ads]
desc_vals = [self.descriptor_dict[s] for s in self.surface_names]
scaled_x_vals = [self.x_axis_function(
d, **self.x_axis_function_kwargs)[0][ads] for d in desc_vals]
label = self.x_axis_function(
desc_vals[0], **self.x_axis_function_kwargs)[-1][ads]
scaled_y_vals = [self.scaling_function(
d, **self.scaling_function_kwargs)[ads] for d in desc_vals]
diffs = [scaled - actual for scaled, actual
in zip(scaled_y_vals, actual_y_vals) if actual != None]
ax = ax_list[i]
m, b = plt.polyfit(scaled_x_vals, scaled_y_vals, 1)
x_vals = np.array([round(min(scaled_x_vals), 1) - 0.1,
round(max(scaled_x_vals), 1) + 0.1])
ax.plot(x_vals, m * x_vals + b, **self.scaling_line_args[i])
err = [yi - (m * xi + b) for xi, yi in zip(scaled_x_vals, actual_y_vals) if yi != None]
all_err += err
ax.set_xlabel(label)
ax.set_ylabel('$E_{' + ads + '}$ [eV]')
num_y_vals = []
# for s,c in zip(self.surface_names,self.surface_colors):
# print s, c
for sf, col, x, y in zip(self.surface_names,
self.surface_colors, scaled_x_vals, actual_y_vals):
if y and y != None:
ax.plot(x, y, 'o', color=col, markersize=10, mec=None)
if self.show_surface_labels:
ax.annotate(sf, [x, y], color=col, **self.label_args)
num_y_vals.append(y)
if self.show_titles:
ax.set_title('$' + ads + '$', size=self.title_size)
all_ys += num_y_vals
if not num_y_vals:
num_y_vals = scaled_y_vals
dy = max(num_y_vals) - min(num_y_vals)
ymins.append([min(num_y_vals), max(num_y_vals)])
if dy > maxyrange:
maxyrange = dy
ax.set_xlim(x_vals)
y_range = [round(min(num_y_vals), 1) - 0.1,
round(max(num_y_vals), 1) + 0.1]
self.scaling_error = all_err
if self.same_scale == True:
for i, ax in enumerate(ax_list):
pad = maxyrange - (ymins[i][1] - ymins[i][0])
y_range = [round(ymins[i][0] - pad, 1) - 0.1,
round(ymins[i][1] + pad, 1) + 0.1]
ax.set_ylim(y_range)
if self.include_error_histogram:
err_ax = fig.add_subplot(spx, spy, len(ads_names) + 1)
err_ax.hist(all_err, bins=15)
err_ax.set_xlabel('$E_{actual} - E_{scaled}$ [eV]')
err_ax.set_ylabel('Counts')
ax_list.append(err_ax)
for ax in ax_list:
if getattr(self, 'n_xticks', None):
ax.xaxis.set_major_locator(MaxNLocator(self.n_xticks))
if getattr(self, 'n_yticks', None):
ax.yaxis.set_major_locator(MaxNLocator(self.n_yticks))
if save is None:
save = self.model_name + '_scaling.pdf'
if save:
fig.savefig(save)
return fig
|
gpl-3.0
|
cpaulik/scipy
|
scipy/signal/spectral.py
|
14
|
34751
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
...[1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode='psd')
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
'''
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>
stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
'''
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
|
bsd-3-clause
|
pianomania/scikit-learn
|
sklearn/utils/random.py
|
46
|
10523
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
tcrossland/time_series_prediction
|
ann/scenario.py
|
1
|
2422
|
import time
import matplotlib.pyplot as plt
import numpy as np
class Config:
def __init__(self, time_series, look_back=6, batch_size=1, topology=None, validation_split=0.3,
include_index=False, activation='tanh', optimizer='adam'):
self.time_series = time_series
self.look_back = look_back
self.batch_size = batch_size
if topology is None:
topology = [5]
self.topology = topology
self.validation_split = validation_split
self.activation = activation
self.include_index = include_index
self.optimizer = optimizer
def __str__(self):
return "w{}-b{}".format(self.look_back, self.batch_size)
class Scenario:
def __init__(self, model, config):
self.model = model
self.time_series = config.time_series
self.dataset = config.time_series.dataset
self.epochs = 0
self.config = config
def execute(self, epochs):
print()
print()
self.epochs = self.epochs + epochs
print(">>>> {} + {} (epochs={}, topology={})".format(self.model, self.time_series, self.epochs,
self.config.topology))
self.model.summary()
start = time.clock()
self.model.train(epochs=epochs, batch_size=self.config.batch_size)
self.training_time = time.clock() - start
print("Training time: %.3f" % self.training_time)
prediction = self.model.evaluate()
# self.plot(predictions)
return prediction
def create_empty_plot(self):
plot = np.empty_like(self.dataset)
plot[:, :] = np.nan
return plot
def create_left_plot(self, data):
offset = self.config.look_back
plot = self.create_empty_plot()
plot[offset:len(data) + offset, :] = data
return plot
def create_right_plot(self, data):
plot = self.create_empty_plot()
plot[-len(data):, :] = data
return plot
def plot(self, predictions):
plt.figure(figsize=(16, 12))
plt.xlim(self.dataset.size * 0.6, self.dataset.size * 0.8)
plt.plot(self.dataset)
for pred in predictions:
plt.plot(self.create_right_plot(pred))
filename = "out/{}/{}-{}.png".format(self.time_series, self.model, self.config)
plt.savefig(filename)
plt.close()
|
mit
|
fbagirov/scikit-learn
|
examples/svm/plot_separating_hyperplane_unbalanced.py
|
329
|
1850
|
"""
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
huzq/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
11
|
14652
|
import pytest
import numpy as np
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import (RandomForestClassifier,
HistGradientBoostingClassifier)
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.base import BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import PCA
class NaNTag(BaseEstimator):
def _more_tags(self):
return {'allow_nan': True}
class NoNaNTag(BaseEstimator):
def _more_tags(self):
return {'allow_nan': False}
class NaNTagRandomForest(RandomForestClassifier):
def _more_tags(self):
return {'allow_nan': True}
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True,
random_state=None, tol=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
with pytest.raises(ValueError):
model.transform(data)
def test_input_estimator_unchanged():
# Test that SelectFromModel fits on a clone of the estimator.
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert transformer.estimator is est
@pytest.mark.parametrize(
"max_features, err_type, err_msg",
[(-1, ValueError, "'max_features' should be 0 and"),
(data.shape[1] + 1, ValueError, "'max_features' should be 0 and"),
('gobbledigook', TypeError, "should be an integer"),
('all', TypeError, "should be an integer")]
)
def test_max_features_error(max_features, err_type, err_msg):
clf = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=clf,
max_features=max_features,
threshold=-np.inf)
with pytest.raises(err_type, match=err_msg):
transformer.fit(data, y)
@pytest.mark.parametrize("max_features", [0, 2, data.shape[1]])
def test_max_features_dim(max_features):
clf = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=clf,
max_features=max_features,
threshold=-np.inf)
X_trans = transformer.fit_transform(data, y)
assert X_trans.shape[1] == max_features
class FixedImportanceEstimator(BaseEstimator):
def __init__(self, importances):
self.importances = importances
def fit(self, X, y=None):
self.feature_importances_ = np.array(self.importances)
def test_max_features():
# Test max_features parameter using various values
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
max_features = X.shape[1]
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer1 = SelectFromModel(estimator=est,
threshold=-np.inf)
transformer2 = SelectFromModel(estimator=est,
max_features=max_features,
threshold=-np.inf)
X_new1 = transformer1.fit_transform(X, y)
X_new2 = transformer2.fit_transform(X, y)
assert_allclose(X_new1, X_new2)
# Test max_features against actual model.
transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025,
random_state=42))
X_new1 = transformer1.fit_transform(X, y)
scores1 = np.abs(transformer1.estimator_.coef_)
candidate_indices1 = np.argsort(-scores1, kind='mergesort')
for n_features in range(1, X_new1.shape[1] + 1):
transformer2 = SelectFromModel(estimator=Lasso(alpha=0.025,
random_state=42),
max_features=n_features,
threshold=-np.inf)
X_new2 = transformer2.fit_transform(X, y)
scores2 = np.abs(transformer2.estimator_.coef_)
candidate_indices2 = np.argsort(-scores2, kind='mergesort')
assert_allclose(X[:, candidate_indices1[:n_features]],
X[:, candidate_indices2[:n_features]])
assert_allclose(transformer1.estimator_.coef_,
transformer2.estimator_.coef_)
def test_max_features_tiebreak():
# Test if max_features can break tie among feature importance
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
max_features = X.shape[1]
feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1])
for n_features in range(1, max_features + 1):
transformer = SelectFromModel(
FixedImportanceEstimator(feature_importances),
max_features=n_features,
threshold=-np.inf)
X_new = transformer.fit_transform(X, y)
selected_feature_indices = np.where(transformer._get_support_mask())[0]
assert_array_equal(selected_feature_indices, np.arange(n_features))
assert X_new.shape[1] == n_features
def test_threshold_and_max_features():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer1 = SelectFromModel(estimator=est, max_features=3,
threshold=-np.inf)
X_new1 = transformer1.fit_transform(X, y)
transformer2 = SelectFromModel(estimator=est, threshold=0.04)
X_new2 = transformer2.fit_transform(X, y)
transformer3 = SelectFromModel(estimator=est, max_features=3,
threshold=0.04)
X_new3 = transformer3.fit_transform(X, y)
assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1])
selected_indices = transformer3.transform(
np.arange(X.shape[1])[np.newaxis, :])
assert_allclose(X_new3, X[:, selected_indices[0]])
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert hasattr(transformer.estimator_, 'feature_importances_')
X_new = transformer.transform(X)
assert X_new.shape[1] < X.shape[1]
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_sample_weight():
# Ensure sample weights are passed to underlying estimator
X, y = datasets.make_classification(
n_samples=100, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = LogisticRegression(random_state=0, fit_intercept=False)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=None)
mask = transformer._get_support_mask()
transformer.fit(X, y, sample_weight=sample_weight)
weighted_mask = transformer._get_support_mask()
assert not np.all(weighted_mask == mask)
transformer.fit(X, y, sample_weight=3 * sample_weight)
reweighted_mask = transformer._get_support_mask()
assert np.all(weighted_mask == reweighted_mask)
def test_coef_default_threshold():
X, y = datasets.make_classification(
n_samples=100, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1,
random_state=42))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_almost_equal(X_new, X[:, mask])
@skip_if_32bit
def test_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert hasattr(transformer.estimator_, 'coef_')
X_new = transformer.transform(X)
assert X_new.shape[1] < X.shape[1]
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = np.linalg.norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False,
max_iter=5, tol=None)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert old_model is new_model
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_almost_equal(X_transform, transformer.transform(data))
# check that if est doesn't have partial_fit, neither does SelectFromModel
transformer = SelectFromModel(estimator=RandomForestClassifier())
assert not hasattr(transformer, "partial_fit")
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert transformer.estimator_.C == 100
def test_prefit():
# Test all possible combinations of the prefit parameter.
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True,
random_state=0, tol=None)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_almost_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_almost_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
with pytest.raises(ValueError):
model.fit(data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_almost_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
# Test that the threshold can be set without refitting the model.
clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True,
random_state=0, tol=None)
model = SelectFromModel(clf, threshold="0.1 * mean")
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = "1.0 * mean"
assert X_transform.shape[1] > model.transform(data).shape[1]
def test_fit_accepts_nan_inf():
# Test that fit doesn't check for np.inf and np.nan values.
clf = HistGradientBoostingClassifier(random_state=0)
model = SelectFromModel(estimator=clf)
nan_data = data.copy()
nan_data[0] = np.NaN
nan_data[1] = np.Inf
model.fit(data, y)
def test_transform_accepts_nan_inf():
# Test that transform doesn't check for np.inf and np.nan values.
clf = NaNTagRandomForest(n_estimators=100, random_state=0)
nan_data = data.copy()
model = SelectFromModel(estimator=clf)
model.fit(nan_data, y)
nan_data[0] = np.NaN
nan_data[1] = np.Inf
model.transform(nan_data)
def test_allow_nan_tag_comes_from_estimator():
allow_nan_est = NaNTag()
model = SelectFromModel(estimator=allow_nan_est)
assert model._get_tags()['allow_nan'] is True
no_nan_est = NoNaNTag()
model = SelectFromModel(estimator=no_nan_est)
assert model._get_tags()['allow_nan'] is False
def _pca_importances(pca_estimator):
return np.abs(pca_estimator.explained_variance_)
@pytest.mark.parametrize(
"estimator, importance_getter",
[(make_pipeline(PCA(random_state=0), LogisticRegression()),
'named_steps.logisticregression.coef_'),
(PCA(random_state=0), _pca_importances)]
)
def test_importance_getter(estimator, importance_getter):
selector = SelectFromModel(
estimator, threshold="mean", importance_getter=importance_getter
)
selector.fit(data, y)
assert selector.transform(data).shape[1] == 1
|
bsd-3-clause
|
bsipocz/seaborn
|
seaborn/algorithms.py
|
35
|
6889
|
"""Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : callable, default np.mean
Function to call on the args that are passed in.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Do the bootstrap
if smooth:
return _smooth_bootstrap(args, n_boot, func, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, func,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def randomize_corrmat(a, tail="both", corrected=True, n_iter=1000,
random_seed=None, return_dist=False):
"""Test the significance of set of correlations with permutations.
By default this corrects for multiple comparisons across one side
of the matrix.
Parameters
----------
a : n_vars x n_obs array
array with variables as rows
tail : both | upper | lower
whether test should be two-tailed, or which tail to integrate over
corrected : boolean
if True reports p values with respect to the max stat distribution
n_iter : int
number of permutation iterations
random_seed : int or None
seed for RNG
return_dist : bool
if True, return n_vars x n_vars x n_iter
Returns
-------
p_mat : float
array of probabilites for actual correlation from null CDF
"""
if tail not in ["upper", "lower", "both"]:
raise ValueError("'tail' must be 'upper', 'lower', or 'both'")
rs = np.random.RandomState(random_seed)
a = np.asarray(a, np.float)
flat_a = a.ravel()
n_vars, n_obs = a.shape
# Do the permutations to establish a null distribution
null_dist = np.empty((n_vars, n_vars, n_iter))
for i_i in range(n_iter):
perm_i = np.concatenate([rs.permutation(n_obs) + (v * n_obs)
for v in range(n_vars)])
a_i = flat_a[perm_i].reshape(n_vars, n_obs)
null_dist[..., i_i] = np.corrcoef(a_i)
# Get the observed correlation values
real_corr = np.corrcoef(a)
# Figure out p values based on the permutation distribution
p_mat = np.zeros((n_vars, n_vars))
upper_tri = np.triu_indices(n_vars, 1)
if corrected:
if tail == "both":
max_dist = np.abs(null_dist[upper_tri]).max(axis=0)
elif tail == "lower":
max_dist = null_dist[upper_tri].min(axis=0)
elif tail == "upper":
max_dist = null_dist[upper_tri].max(axis=0)
cdf = lambda x: stats.percentileofscore(max_dist, x) / 100.
for i, j in zip(*upper_tri):
observed = real_corr[i, j]
if tail == "both":
p_ij = 1 - cdf(abs(observed))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
else:
for i, j in zip(*upper_tri):
null_corrs = null_dist[i, j]
cdf = lambda x: stats.percentileofscore(null_corrs, x) / 100.
observed = real_corr[i, j]
if tail == "both":
p_ij = 2 * (1 - cdf(abs(observed)))
elif tail == "lower":
p_ij = cdf(observed)
elif tail == "upper":
p_ij = 1 - cdf(observed)
p_mat[i, j] = p_ij
# Make p matrix symettrical with nans on the diagonal
p_mat += p_mat.T
p_mat[np.diag_indices(n_vars)] = np.nan
if return_dist:
return p_mat, null_dist
return p_mat
|
bsd-3-clause
|
barbot/Cosmos
|
src/syn/syn_tavi.py
|
1
|
15978
|
#!env python
import monsoon
import sys
import threading
import time
import socket
import sys
import tty, termios
import struct
import binascii
import random
import ctypes
import numpy as np
import os
import GPy
import math
from scipy.special import erf
import matplotlib
from IPython.display import display
from matplotlib import pyplot as plt
from fparams_tavi import isParamValuationFeasible
exitFlag = 1
stDataColl = 0
collectedSamples = []
useVM = 0
logTime = 10 # in Seconds
constfile = "const.m"
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
N_INITIAL_SAMPLES = 2
N_OPT_STEPS = 1000
resultfile = "result.m"
tmpconstfile = "tmpconst.m"
XMAX = 400.0
YMAX = 20000.0
def get_my_string(fp):
f = open(fp, 'r')
string = str(f.read())
f.close()
return string
def GetReward():
cmd="Cosmos"
cmd += " HeartModelAll.slx"
cmd += " prop.lha"
#cmd += format(" --const \"SA_d=%f\""%x)
cmd += " --max-run 10 --batch 0 -v 0 --njob 2"
print(cmd+"\n")
os.system(cmd)
os.system("grep -A 1 \"Total:\" Result.res | grep \"Estimated value\" | sed \"s/Estimated value:\t//g\" > tmpResult")
v = eval(get_my_string("tmpResult"))
return v
if len(sys.argv) > 1:
useVM = 1
def SetArduinoParameter(handle, parID, parValue):
headerID = 0xF5
print "Sending Arduino parameter ID: "+str(parID)+" value: "+str(parValue)
if SetParameter(handle, headerID, parID, parValue)!=1:
return 0
return 1
def SetClientParameter(handle, parID, parValue):
headerID = 0xF4
print "Sending Client parameter ID: "+str(parID)+" value: "+str(parValue)
if SetParameter(handle, headerID, parID, parValue)!=1:
return 0
return 1
def SetParameter(handle, headerID, parID, parValue):
parStr = struct.pack('BBI',headerID, parID, parValue)
# Send parameter
handle.sendall(parStr)
buftmp = bytearray(1)
buflen = handle.recv_into(buftmp,1)
if buftmp[0]!=0xF6:
print "Wrong return value"
return 0
return 1
def Save2DArray(handle, arr):
for idx in range(len(arr)):
handle.write(str(arr[idx][0])+" "+str(arr[idx][1])+"\n")
def SaveMarkersToFile(handle, dta):
if dta==0:
handle.write(str(0)+" "+str(0))
elif dta==1:
handle.write(str(1)+" "+str(0))
elif dta==2:
handle.write(str(0)+" "+str(1))
elif dta==3:
handle.write(str(1)+" "+str(1))
def SaveToFile(handle, dta):
SaveMarkersToFile(handle, dta)
handle.write("\n")
def SaveToFileAll(handle, dta, current):
SaveMarkersToFile(handle, dta)
handle.write(" "+str(current))
handle.write("\n")
def GetMinCurrent(pmData):
currList = [it[0] for it in pmData]
return min(currList)
def GetSumCurrent(pmData, monitorSamplingFreq):
sumCurrent = 0
prev = pmData[0][1]
it_beg = 0
for it in range(1,len(pmData)):
if pmData[it][1]!=2 and prev==2:
it_beg = it
break
cnt = 0
for it in range(it_beg,len(pmData)):
if pmData[it][1]==2 and cnt>=20:
break
elif pmData[it][1]==2 and cnt<20:
cnt = cnt + 1
elif prev==2 and pmData[it][1]!=2:
cnt = 0
sumCurrent = sumCurrent + pmData[it][0]
prev = pmData[it][1]
return sumCurrent
def GetEnergyReadings(pmData, monitorSamplingFreq, tranListTimes, minCurrent):
cummCurrentList = []
sumCurrent = 0
sampleCnt = 1
prev = pmData[0][1]
timeList = [item[1] for item in tranListTimes]
cummCurrentList = [[item[0],item[1],0] for item in tranListTimes]
it_beg = 0
for it in range(1,len(pmData)):
if pmData[it][1]!=2 and prev==2:
it_beg = it
break
tmpSampleCnt = 0
timeListIdx = 0
for it in range(it_beg,len(pmData)):
stime = sampleCnt/monitorSamplingFreq
sumCurrent = sumCurrent + pmData[it][0]
tmpSampleCnt = tmpSampleCnt + 1
if timeListIdx>=len(timeList):
break
if stime>=timeList[timeListIdx]:
cummCurrentList[timeListIdx][2] = sumCurrent - tmpSampleCnt * minCurrent
timeListIdx = timeListIdx + 1
sumCurrent = 0
tmpSampleCnt = 0
sampleCnt = sampleCnt + 1
filedta = open('markers.txt', 'w+')
for it in range(it_beg,len(pmData)):
SaveToFileAll(filedta, pmData[it][1], pmData[it][0])
filedta.close()
return cummCurrentList
def GetTotalCurrents(handle, esamples, monitorSamplingFreq):
cummCurrentList = []
# Send get list of IDs
handle.sendall('\xF3')
bufSize = bytearray(4)
buflen = handle.recv_into(bufSize, 4)
if buflen!=4:
print "Wrong packet length"
return cummCurrentList
bufSize = struct.unpack("I",bufSize)
print "Packet size: "+str(bufSize[0])
bufIDs = bytearray()
buftmp = bytearray(2048)
bytes_recd = 0
while bytes_recd < bufSize[0]:
chunkLen = handle.recv_into(buftmp,2048)
bufIDs[bytes_recd:] = buftmp[:chunkLen]
bytes_recd = bytes_recd + chunkLen
if bytes_recd != bufSize[0]:
print "Wrong data"
return cummCurrentList
nTranCnt = bufSize[0]/8
print "Number of transitions received: "+str(nTranCnt)
tTimeList = []
for idx in range(0,nTranCnt):
tID = bufIDs[idx*8]
tTime = struct.unpack("I",bufIDs[idx*8+1:idx*8+1+4])
tTimeList.append([tID, tTime[0]])
#print str(tTime[0])+" "+str(bufIDs[idx*8])
toRemove = []
for idx in range(len(tTimeList)-1):
if tTimeList[idx][1]==tTimeList[idx+1][1]:
toRemove.append(tTimeList[idx])
for rem in toRemove:
tTimeList.remove(rem)
minCurrent = GetMinCurrent(esamples)
cummCurrentList = GetEnergyReadings(esamples, monitorSamplingFreq, tTimeList, minCurrent)
return cummCurrentList
def SaveDistribution(handle, dData):
dIdCurr = {}
for key, val in dData:
dIdCurr.setdefault(key, []).append(val)
keyRange = range(53, 59)
for key in keyRange:
if key not in dIdCurr:
handle.write("T"+str(key)+"_min"+" = "+str(0)+"\n")
handle.write("T"+str(key)+"_max"+" = "+str(0)+"\n")
handle.write("T"+str(key)+"_mean"+" = "+str(0)+"\n")
handle.write("T"+str(key)+"_var"+" = "+str(0)+"\n")
handle.write("T"+str(key)+"_list"+" = "+str([0])+"\n")
else:
handle.write("T"+str(key)+"_min"+" = "+str(min(dIdCurr[key]))+"\n")
handle.write("T"+str(key)+"_max"+" = "+str(max(dIdCurr[key]))+"\n")
handle.write("T"+str(key)+"_mean"+" = "+str(np.mean(dIdCurr[key]))+"\n")
handle.write("T"+str(key)+"_var"+" = "+str(np.var(dIdCurr[key]))+"\n")
handle.write("T"+str(key)+"_list"+" = "+str(dIdCurr[key])+"\n")
def normpdf(x):
return np.exp(-x*x/2)/np.sqrt(2*math.pi)
def normcdf(x):
return (1+ erf(x/np.sqrt(2)))/2
def findMax(m, X2, minv, maxv, fmin):
mu,s2 = m.predict(X2)
t = np.argmax((fmin-mu) * normcdf( (fmin-mu)/np.sqrt(s2) ) + np.sqrt(s2)*normpdf( (fmin-mu)/np.sqrt(s2) ))
#t2 = minv + (maxv - minv)*t/len(X2)
return X2[t]
class PowerMonitorThread (threading.Thread):
def __init__(self, monitor):
threading.Thread.__init__(self)
self.monitor = monitor
def run(self):
print "Starting PowerMonitor Thread\n"
self.monitor.StartDataCollection()
while exitFlag:
samples = self.monitor.CollectData()
if stDataColl:
collectedSamples.extend(samples)
#print "\n".join(map(str, samples))
self.monitor.StopDataCollection()
print "Ending PowerMonitor Thread\n"
if useVM==0:
mon = monsoon.Monsoon("/dev/tty.usbmodemfa131")
mon.SetVoltage(4.55)
mon.SetUsbPassthrough(0)
monItems = mon.GetStatus()
items = sorted(monItems.items())
print "\n".join(["%s: %s" % item for item in items])
mon.StopDataCollection()
# Create new threads
threadMonitor = PowerMonitorThread(mon)
# Start new Threads
threadMonitor.start()
############################
#X = np.random.uniform(-3000.,3000.,(20,1))
#Y = np.sin(X/1000) + np.random.randn(20,1)*0.05
#kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
#m = GPy.models.GPRegression(X,Y,kernel)
#m.optimize_restarts(num_restarts = 10)
#m.plot()
#display(m)
#plt.savefig(format('gaussfig0'))
############################
HOST = 'localhost' # The remote host
PORT = 27778 # The same port as used by the server
time.sleep(4);
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
s = None
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
sys.exit(1)
#s.setblocking(0)
idCurr = []
# 0 = ID, 1 = min, 2 = max, 3 = set value
parVals = [[0, 1000, 2000, 1000], [1, 20000, 10400], [2, 100, 30000, 3000], [0, 0, 2000, 500], [1, 10, 500, 131], [2, 200, 1200, 1000]]
parDict = {'SA_d':0, 'SA_ectopD':1, 'VRG_d':2, 'TURI':3, 'TAVI':4, 'TLRI':5}
XPace = np.array([0])
YPace = np.array([0])
XYPace = np.array([0])
#parNameC = "SA_d"
#parNameA = "TURI"
#parNameF = ["TLRI", "TAVI"]
parNameC = "SA_d"
parNameA = "TAVI"
parNameF = ["TLRI", "TAVI", "TURI"]
if useVM==0:
print "Preparing the PowerMonitor device..."
time.sleep(4);
print "Get initial values"
logTime = 60
for iters in range(0, N_INITIAL_SAMPLES):
# Generate random parameter for pacemaker
parValueArduino = random.randint(parVals[parDict[parNameA]][1], parVals[parDict[parNameA]][2])
# For testing
#parValueArduino = 150
tmpParArduino = parVals[parDict[parNameA]][3]
parVals[parDict[parNameA]][3] = parValueArduino
parll = [parVals[parDict[parNameF[0]]][3]-parVals[parDict[parNameF[1]]][3], parVals[parDict[parNameF[2]]][3]]
if isParamValuationFeasible(parll)!=1:
print "Pacemaker parameter not feasible: "+str(parll)
parVals[parDict[parNameA]][3] = tmpParArduino
continue
if SetArduinoParameter(s, parVals[parDict[parNameA]][0], parVals[parDict[parNameA]][3])!=1:
break
# Generate random parameter for Client
parVals[parDict[parNameC]][3] = random.randint(parVals[parDict[parNameC]][1], parVals[parDict[parNameC]][2])
# For testing purposes
#parValueClient = 2000
if SetClientParameter(s, parVals[parDict[parNameC]][0], parVals[parDict[parNameC]][3])!=1:
break;
# Save energy readings
fileconst = open(constfile, 'w+')
fileconst.write(parNameA+" = "+str(parVals[parDict[parNameA]][3])+"\n")
fileconst.write(parNameC+" = "+str(parVals[parDict[parNameC]][3])+"\n")
#isParamValuationFeasible(param)
print "Start iteration: "+str(iters)
# Start iteration
s.sendall('\xF0')
stDataColl = 1
time.sleep(logTime);
# Stop iteration
s.sendall('\xF1')
time.sleep(1);
stDataColl = 0
print "Stopped collecting data"
cummCurrentList = GetTotalCurrents(s, collectedSamples, monItems['sampleRate'])
if len(cummCurrentList)==0:
break
# Save energy readings to list
for item in cummCurrentList:
idCurr.append([item[0],item[2]])
SaveDistribution(fileconst, idCurr)
fileconst.close()
#energyValue = GetReward()
energyValue = GetSumCurrent(collectedSamples, monItems['sampleRate'])
XPace = np.vstack((XPace,parVals[parDict[parNameA]][3]))
XYPace = np.vstack((XYPace,parVals[parDict[parNameC]][3]))
YPace = np.vstack((YPace,energyValue))
print XPace
print YPace
os.system("rm "+constfile)
collectedSamples = []
print "Initial sample:"
XPace = XPace[1:len(XPace)]
YPace = YPace[1:len(YPace)]
XYPace = XYPace[1:len(XYPace)]
XYPace = np.hstack((XYPace,YPace))
# For testing purposes
tmpfileconst = open(tmpconstfile, 'w+')
Save2DArray(tmpfileconst, XYPace)
tmpfileconst.close()
print "Optimize parameters"
logTime = 60
# Store the initial values of parameters
rfhandle = open(resultfile, 'w+')
rfhandle.close()
rfhandle = open(resultfile, 'a')
rfhandle.write("paramp = ["+"\n")
for idx in range(len(XPace)):
rfhandle.write(str(XPace[idx])+" "+str(YPace[idx])+";\n")
rfhandle.close()
for iters in range(0, N_OPT_STEPS):
m = GPy.models.GPRegression(XPace/float(XMAX),YPace/float(YMAX),kernel)
m.optimize_restarts(num_restarts = 20)
Xin = np.linspace(parVals[parDict[parNameA]][1], parVals[parDict[parNameA]][2],num=1000).reshape((1000,1))
Xf = np.array([0])
for idx in range(len(Xin)):
#parll = [parVals[parDict[parNameF[0]]][3]-parVals[parDict[parNameF[1]]][3], Xin[idx]]
parll = [parVals[parDict[parNameF[0]]][3]-Xin[idx], parVals[parDict[parNameF[2]]][3]]
if isParamValuationFeasible(parll)==1:
Xf = np.vstack((Xf,Xin[idx]))
Xf = Xf[1:len(Xf)]
parValueArduino = findMax(m, Xf/XMAX, parVals[parDict[parNameA]][1]/XMAX, parVals[parDict[parNameA]][2]/XMAX, min(YPace/YMAX))
tmpParArduino = parVals[parDict[parNameA]][3]
parVals[parDict[parNameA]][3] = int(parValueArduino*XMAX)
parll = [parVals[parDict[parNameF[0]]][3]-parVals[parDict[parNameF[1]]][3], parVals[parDict[parNameF[2]]][3]]
if isParamValuationFeasible(parll)!=1:
print "Pacemaker parameter not feasible: "+str(parll)
parVals[parDict[parNameA]][3] = tmpParArduino
continue
if SetArduinoParameter(s, parVals[parDict[parNameA]][0], parVals[parDict[parNameA]][3])!=1:
break
# Generate random parameter for Client
parVals[parDict[parNameC]][3] = random.randint(parVals[parDict[parNameC]][1], parVals[parDict[parNameC]][2])
# For testing purposes
#parValueClient = 2000
if SetClientParameter(s, parVals[parDict[parNameC]][0], parVals[parDict[parNameC]][3])!=1:
break;
# Save energy readings
fileconst = open(constfile, 'w+')
fileconst.write(parNameA+" = "+str(parVals[parDict[parNameA]][3])+"\n")
fileconst.write(parNameC+" = "+str(parVals[parDict[parNameC]][3])+"\n")
#isParamValuationFeasible(param)
print "Start iteration: "+str(iters)
# Start iteration
s.sendall('\xF0')
stDataColl = 1
time.sleep(logTime);
# Stop iteration
s.sendall('\xF1')
time.sleep(1);
stDataColl = 0
print "Stopped collecting data"
cummCurrentList = GetTotalCurrents(s, collectedSamples, monItems['sampleRate'])
if len(cummCurrentList)==0:
break
# Save energy readings to list
for item in cummCurrentList:
idCurr.append([item[0],item[2]])
SaveDistribution(fileconst, idCurr)
fileconst.close()
#energyValue = GetReward()
energyValue = GetSumCurrent(collectedSamples, monItems['sampleRate'])
XPace = np.vstack((XPace,parVals[parDict[parNameA]][3]))
YPace = np.vstack((YPace,energyValue))
os.system("rm "+constfile)
collectedSamples = []
m.plot()
plt.plot(XPace,YPace,'bo')
plt.xlabel('$\mathrm{TAVI}$')
plt.ylabel('$\mathrm{Energy}$')
display(m)
display(plt)
plt.savefig(format('gaussfig%i'%iters))
# Save to file
rfhandle = open(resultfile, 'a')
rfhandle.write(str(parValueArduino)+" "+str(energyValue)+";\n")
rfhandle.close()
s.sendall('\xF2')
exitFlag = 0
# Save to file
rfhandle = open(resultfile, 'a')
rfhandle.write("];"+"\n")
rfhandle.close()
else:
key = ''
# Generate random parameter
headerID = 0xF4
parID = 1
#parValue = random.randint(1, 2000)
parValue = 2000
print "Sending parameter ID: "+str(parID)+" value: "+str(parValue)
parStr = struct.pack('BBI',headerID, parID, parValue)
# Send parameter to Client
s.sendall(parStr)
buftmp = bytearray(1)
buflen = s.recv_into(buftmp,1)
if buftmp[0]!=0xF6:
print "Wrong return value"
s.close()
exit()
while key!='a':
print "Press a key: "
key = sys.stdin.read(1)
if key=='w':
s.sendall('\xF0')
stDataColl = 1
elif key=='s':
s.sendall('\xF1')
time.sleep(1);
stDataColl = 0
print "Stopped collecting data"
# Send get list of IDs
s.sendall('\xF3')
bufSize = bytearray(4)
buflen = s.recv_into(bufSize, 4)
if buflen!=4:
print "Wrong packet length"
break
bufSize = struct.unpack("I",bufSize)
print "Packet size: "+str(bufSize[0])
bufIDs = bytearray()
buftmp = bytearray(2048)
bytes_recd = 0
while bytes_recd < bufSize[0]:
chunkLen = s.recv_into(buftmp,2048)
bufIDs[bytes_recd:] = buftmp[:chunkLen]
bytes_recd = bytes_recd + chunkLen
if bytes_recd != bufSize[0]:
print "Wrong data"
break
print binascii.hexlify(bufIDs)
collectedSamples = []
elif key=='a':
s.sendall('\xF2')
s.close()
|
gpl-2.0
|
datachand/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_link_functions_gammaGLM.py
|
5
|
2024
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_gamma():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).
open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,5]
sm_data_features = sm_data[:,[1,2,3,4,6,7,8,9]]
print("Testing for family: GAMMA")
print("Set variables for h2o.")
myY = "DPROS"
myX = ["ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","CAPSULE"]
print("Create models with canonical link: INVERSE")
h2o_model_in = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gamma", link="inverse",alpha=[0.5], Lambda=[0])
sm_model_in = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Gamma(sm.families.links.inverse_power)).fit()
print("Compare model deviances for link function inverse")
h2o_deviance_in = h2o_model_in.residual_deviance() / h2o_model_in.null_deviance()
sm_deviance_in = sm_model_in.deviance / sm_model_in.null_deviance
assert h2o_deviance_in - sm_deviance_in < 0.01, "expected h2o to have an equivalent or better deviance measures"
print("Create models with canonical link: LOG")
h2o_model_log = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gamma", link="log",alpha=[0.5], Lambda=[0])
sm_model_log = sm.GLM(endog=sm_data_response, exog=sm_data_features,
family=sm.families.Gamma(sm.families.links.log)).fit()
print("Compare model deviances for link function log")
h2o_deviance_log = h2o_model_log.residual_deviance() / h2o_model_log.null_deviance()
sm_deviance_log = sm_model_log.deviance / sm_model_log.null_deviance
assert h2o_deviance_log - sm_deviance_log < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
tests.run_test(sys.argv, link_functions_gamma)
|
apache-2.0
|
apache/incubator-asterixdb
|
asterixdb/asterix-app/src/test/resources/TweetSent/sentiment.py
|
1
|
1161
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math,sys
import pickle;
import sklearn;
import os;
class TweetSent(object):
def __init__(self):
pickle_path = os.path.join(os.path.dirname(__file__), 'sentiment_pipeline3')
f = open(pickle_path,'rb')
self.pipeline = pickle.load(f)
f.close()
def sentiment(self, *args):
return self.pipeline.predict(args[0])[0].item()
|
apache-2.0
|
DonghoChoi/ISB_Project
|
local/CIS_application.py
|
2
|
4126
|
#!/usr/bin/python
# Author: Dongho Choi
'''
This script
(1) read multiple tables from data base
(2) join the tables into one for analysis
'''
import os.path
import datetime
import math
import time
import itertools
import pandas as pd
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
import sys
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
import statsmodels.api as sm
import csv
#sys.path.append("./configs/")
sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
if __name__ == "__main__":
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established")
# Average Coverage and UsefulCoverage when Coverage is greater than zero coding data
df_coverage_with_zero_field = pd.read_sql('SELECT userID,count(Coverage) as count_session_with_zero, avg(Coverage) as avg_cov_with_zero,avg(UsefulCoverage) as avg_usecov_with_zero FROM user_field_session_coverage WHERE Coverage >= 0 group by userID', con=connection)
print("Coverage_with_zero data imported.")
# Average Coverage and UsefulCoverage when Coverage is greater than zero coding data
df_coverage_gt_zero_field = pd.read_sql('SELECT userID,count(Coverage) as count_session_gt_zero, avg(Coverage) as avg_cov_gt_zero,avg(UsefulCoverage) as avg_usecov_gt_zero FROM user_field_session_coverage WHERE Coverage > 0 group by userID', con=connection)
print("Coverage_gt_zero data imported.")
# Average Use_ratio when Coverage being greater than zero
df_useratio_field = pd.read_sql('SELECT userID,avg(Use_ratio) as avg_useratio FROM user_field_session_coverage WHERE Coverage > 0 group by userID', con=connection)
print("Useratio data imported.")
# Online diversity in field session
df_online_diversity_field = pd.read_sql('SELECT userID, online_diversity as online_diversity_field, online_loyalty as online_loyalty_field FROM user_online_diversity', con=connection)
print("Online diversity data imported.")
# Online performance in lab task 2
df_lab_performance = pd.read_sql('SELECT userID,Coverage as Cov_task2,UniqueCoverage as UniCov_task2,UsefulCoverage as UseCov_task2, UniqueUsefulCoverage as UniUseCov_task2 FROM individual_data', con=connection)
print("Individual data imported.")
# Geo-exploration: S_k measure
df_geo_s_k = pd.read_sql('SELECT userID,gyration_all,gyration_k,s_k FROM mobility_data', con=connection)
print("s_k measure imported")
# Geo-exploration diversity in field session
df_location_diversity = pd.read_sql('SELECT userID,location_diversity,location_loyalty FROM user_location_diversity', con=connection)
print("Location diversity imported")
#server.stop()
# Joining multiple dataframes
df_join = pd.merge(df_coverage_with_zero_field, df_coverage_gt_zero_field, on='userID', how='inner')
df_join = pd.merge(df_join, df_useratio_field, on='userID', how='inner')
df_join = pd.merge(df_join, df_online_diversity_field, on='userID', how='inner')
df_join = pd.merge(df_join, df_lab_performance, on='userID', how='inner')
df_join = pd.merge(df_join, df_geo_s_k, on='userID', how='inner')
df_join = pd.merge(df_join, df_location_diversity, on='userID', how='inner')
print(df_join)
print(df_join.corr(method='pearson'))
|
gpl-3.0
|
gpospelov/BornAgain
|
Examples/fit51_Basic/basic_fitting_tutorial.py
|
1
|
4069
|
"""
Fitting example: 4 parameters fit for mixture of cylinders and prisms on top
of substrate.
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
import numpy as np
from matplotlib import pyplot as plt
def get_sample(params):
"""
Returns a sample with uncorrelated cylinders and prisms on a substrate.
"""
cylinder_height = params["cylinder_height"]
cylinder_radius = params["cylinder_radius"]
prism_height = params["prism_height"]
prism_base_edge = params["prism_base_edge"]
# defining materials
m_vacuum = ba.HomogeneousMaterial("Vacuum", 0.0, 0.0)
m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = ba.FormFactorCylinder(cylinder_radius, cylinder_height)
cylinder = ba.Particle(m_particle, cylinder_ff)
prism_ff = ba.FormFactorPrism3(prism_base_edge, prism_height)
prism = ba.Particle(m_particle, prism_ff)
layout = ba.ParticleLayout()
layout.addParticle(cylinder, 0.5)
layout.addParticle(prism, 0.5)
# vacuum layer with particles and substrate form multi layer
vacuum_layer = ba.Layer(m_vacuum)
vacuum_layer.addLayout(layout)
substrate_layer = ba.Layer(m_substrate, 0)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(vacuum_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation(params):
"""
Returns a GISAXS simulation with beam and detector defined
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(100, -1.0*deg, 1.0*deg, 100, 0.0*deg,
2.0*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
simulation.beam().setIntensity(1e+08)
simulation.setSample(get_sample(params))
return simulation
def create_real_data():
"""
Generating "experimental" data by running simulation with certain parameters.
The data is saved on disk in the form of numpy array.
"""
# default sample parameters
params = {
'cylinder_height': 5.0*nm,
'cylinder_radius': 5.0*nm,
'prism_height': 5.0*nm,
'prism_base_edge': 5.0*nm
}
# retrieving simulated data in the form of numpy array
simulation = get_simulation(params)
simulation.runSimulation()
real_data = simulation.result().array()
# spoiling simulated data with noise to produce "real" data
np.random.seed(0)
noise_factor = 0.1
noisy = np.random.normal(real_data, noise_factor*np.sqrt(real_data))
noisy[noisy < 0.1] = 0.1
np.savetxt("basic_fitting_tutorial_data.txt.gz", real_data)
def load_real_data():
"""
Loads experimental data from file
"""
return np.loadtxt("basic_fitting_tutorial_data.txt.gz", dtype=float)
def run_fitting():
"""
Setup simulation and fit
"""
real_data = load_real_data()
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(get_simulation, real_data)
# Print fit progress on every n-th iteration.
fit_objective.initPrint(10)
# Plot fit progress on every n-th iteration. Will slow down fit.
fit_objective.initPlot(10)
params = ba.Parameters()
params.add("cylinder_height", 4.*nm, min=0.01)
params.add("cylinder_radius", 6.*nm, min=0.01)
params.add("prism_height", 4.*nm, min=0.01)
params.add("prism_base_edge", 6.*nm, min=0.01)
minimizer = ba.Minimizer()
result = minimizer.minimize(fit_objective.evaluate, params)
fit_objective.finalize(result)
print("Fitting completed.")
print("chi2:", result.minValue())
for fitPar in result.parameters():
print(fitPar.name(), fitPar.value, fitPar.error)
# saving simulation image corresponding to the best fit parameters
# np.savetxt("data.txt", fit_objective.simulationResult().array())
if __name__ == '__main__':
# uncomment line below to regenerate "experimental" data file
# create_real_data()
run_fitting()
plt.show()
|
gpl-3.0
|
sourcepole/kadas-albireo
|
python/plugins/processing/algs/qgis/RasterLayerHistogram.py
|
5
|
3235
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BINS,
self.tr('Number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.PLOT, self.tr('Output plot')))
self.addOutput(OutputTable(self.TABLE, self.tr('Output table')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
nbins = self.getParameterValue(self.BINS)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
|
gpl-2.0
|
hoenirvili/distributions
|
distributions/bernoulli.py
|
1
|
2492
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import bernoulli
from .distribution import Distribution
__all__ = ['Bernoulli']
class Bernoulli(Distribution):
"""
A random variable X that has a bernoulli distribution
represents the success in one
independent yes/no trial, of which
yields success with probability of p.
Parameters
----------
p : int or float
Probability of a trial to be successful
"""
def __init__(self, p):
if (type(p) != int and type(p) != float or
p > 1 or p < 0 or p is None):
raise ValueError("Invalid probability number")
self.__p = p
self.__r = 1
self.__all_r = np.arange(0, self.__r+1)
def mean(self):
"""
Compute the mean of the distribution
Returns:
--------
mean : float
"""
return bernoulli.mean(self.__p)
def variance(self):
"""
Compute the variance of the distribution
Returns:
--------
variance : float
"""
return bernoulli.var(self.__p)
def std(self):
"""
Compute the standard deviation of the distribution.
Returns:
--------
std : float
"""
return bernoulli.std(self.__p)
def pmf(self):
"""
Compute the probability mass function of the distribution
Returns:
--------
pmf : float
"""
return bernoulli.pmf(self.__r, self.__p)
def cdf(self):
"""
Compute the cumulative distribution function.
Returns:
--------
cdf : float
"""
return bernoulli.cdf(self.__r, self.__p)
def pmfs(self):
"""
Compute the probability mass function of the distribution the
success and failure in one trial p, 1-p
Returns:
--------
pmf : numpy.narray
"""
return bernoulli.pmf(self.__all_r, self.__p)
def plot(self):
"""Plot values pmfs values of the distribution in one trial"""
pmfs = self.pmfs()
fix, ax = plt.subplots()
x = np.arange(0, 2)
plt.bar(x, pmfs, color="blue")
ax.set_xticks(x)
ax.set_xticklabels(['Failure', 'Success'])
ax.set_title('Bernoulli distribution')
ax.set_ylabel('Probability of success')
ax.set_ylim([0, 1])
plt.show()
|
mit
|
thomasaarholt/hyperspy
|
hyperspy/misc/math_tools.py
|
3
|
5483
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import math
import numbers
import numpy as np
import dask.array as da
from functools import reduce
def symmetrize(a):
return a + a.swapaxes(0, 1) - np.diag(a.diagonal())
def antisymmetrize(a):
return a - a.swapaxes(0, 1) + np.diag(a.diagonal())
def closest_nice_number(number):
oom = 10 ** math.floor(math.log10(number))
return oom * (number // oom)
def get_linear_interpolation(p1, p2, x):
"""Given two points in 2D returns y for a given x for y = ax + b
Parameters
----------
p1,p2 : (x, y)
x : float
Returns
-------
y : float
"""
x1, y1 = p1
x2, y2 = p2
a = (y2 - y1) / (x2 - x1)
b = (x2 * y1 - x1 * y2) / (x2 - x1)
y = a * x + b
return y
def order_of_magnitude(number):
"""Order of magnitude of the given number
Parameters
----------
number : float
Returns
-------
Float
"""
return math.floor(math.log10(number))
def isfloat(number):
"""Check if a number or array is of float type.
This is necessary because e.g. isinstance(np.float32(2), float) is False.
"""
if hasattr(number, "dtype"):
return np.issubdtype(number, np.floating)
else:
return isinstance(number, float)
def anyfloatin(things):
"""Check if iterable contains any non integer."""
for n in things:
if isfloat(n) and not n.is_integer():
return True
return False
def outer_nd(*vec):
"""
Calculates outer product of n vectors
Parameters
----------
vec : vector
Return
------
out : ndarray
"""
return reduce(np.multiply.outer, vec)
def hann_window_nth_order(m, order):
"""
Calculates 1D Hann window of nth order
Parameters
----------
m : int
number of points in window (typically the length of a signal)
order : int
Filter order
Return
------
window : array
window
"""
if not isinstance(m, int) or m <= 0:
raise ValueError('Parameter m has to be positive integer greater than 0.')
if not isinstance(order, int) or order <= 0:
raise ValueError('Filter order has to be positive integer greater than 0.')
sin_arg = np.pi * (m - 1.) / m
cos_arg = 2. * np.pi / (m - 1.) * (np.arange(m))
return m / (order * 2 * np.pi) * sum([(-1) ** i / i *
np.sin(i * sin_arg) * (np.cos(i * cos_arg) - 1)
for i in range(1, order + 1)])
def optimal_fft_size(target, real=False):
"""Wrapper around scipy function next_fast_len() for calculating optimal FFT padding.
scipy.fft was only added in 1.4.0, so we fall back to scipy.fftpack
if it is not available. The main difference is that next_fast_len()
does not take a second argument in the older implementation.
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
real : bool, optional
True if the FFT involves real input or output, only available
for scipy > 1.4.0
Returns
-------
int
Optimal FFT size.
"""
try:
from scipy.fft import next_fast_len
support_real = True
except ImportError: # pragma: no cover
from scipy.fftpack import next_fast_len
support_real = False
if support_real:
return next_fast_len(target, real)
else: # pragma: no cover
return next_fast_len(target)
def check_random_state(seed, lazy=False):
"""Turn a random seed into a np.random.RandomState instance.
Parameters
----------
seed : None or int or np.random.RandomState or dask.array.random.RandomState
If None:
Return the RandomState singleton used by
np.random or dask.array.random
If int:
Return a new RandomState instance seeded with ``seed``.
If np.random.RandomState:
Return it.
If dask.array.random.RandomState:
Return it.
lazy : bool, default False
If True, and seed is ``None`` or ``int``, return
a dask.array.random.RandomState instance instead.
"""
# Derived from `sklearn.utils.check_random_state`.
# Copyright (c) 2007-2020 The scikit-learn developers.
# All rights reserved.
if seed is None or seed is np.random:
return da.random._state if lazy else np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return da.random.RandomState(seed) if lazy else np.random.RandomState(seed)
if isinstance(seed, (da.random.RandomState, np.random.RandomState)):
return seed
raise ValueError(f"{seed} cannot be used to seed a RandomState instance")
|
gpl-3.0
|
vibhutiM/Production-Failures
|
scripts/train.py
|
2
|
4101
|
# coding: utf-8
# ### Open using Jupyter Notebook. It holds the code and visualizations for developing the different classification algorithms (LibSVM, RBF SVM, Naive Bayes, Random Forest, Gradient Boosting) on the chosen subset of important features.
# In[27]:
import pandas as pd
import numpy as np
from numpy import sort
from sklearn.metrics import matthews_corrcoef, accuracy_score,confusion_matrix
from sklearn.feature_selection import SelectFromModel
from matplotlib import pyplot
import pylab as pl
from sklearn import svm
get_ipython().magic(u'matplotlib inline')
# In[4]:
SEED = 1234
## Selected set of most important features
featureSet=['L3_S31_F3846','L1_S24_F1578','L3_S33_F3857','L1_S24_F1406','L3_S29_F3348','L3_S33_F3863',
'L3_S29_F3427','L3_S37_F3950','L0_S9_F170', 'L3_S29_F3321','L1_S24_F1346','L3_S32_F3850',
'L3_S30_F3514','L1_S24_F1366','L2_S26_F3036']
train_x = pd.read_csv("../data/train_numeric.csv", usecols=featureSet)
train_y = pd.read_csv("../data/train_numeric.csv", usecols=['Response'])
# In[5]:
test_x = pd.read_csv("../data/test_numeric.csv", usecols=featureSet)
# In[6]:
train_x = train_x.fillna(9999999)
msk = np.random.rand(len(train_x)) < 0.7 # creating Training and validation set
X_train = train_x[msk]
Y_train = train_y.Response.ravel()[msk]
X_valid = train_x[~msk]
Y_valid = train_y.Response.ravel()[~msk]
# In[7]:
def showconfusionmatrix(cm, typeModel):
pl.matshow(cm)
pl.title('Confusion matrix for '+typeModel)
pl.colorbar()
pl.show()
# In[24]:
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
C=4
lin_svc = svm.LinearSVC(C=C).fit(X_train, Y_train)
print "LibSVM fitted"
title = 'LinearSVC (linear kernel)'
predicted = lin_svc.predict(X_valid)
mcc= matthews_corrcoef(Y_valid, predicted)
print "MCC Score \t +"+title+str(mcc)
cm = confusion_matrix(predicted, Y_valid)
showconfusionmatrix(cm, title)
print "Confusion Matrix"
print (cm)
# In[22]:
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
C=4
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X_train, Y_train)
print "RBF fitted"
title = 'SVC with RBF kernel'
predicted = rbf_svc.predict(X_valid)
mcc= matthews_corrcoef(Y_valid, predicted)
print "MCC Score \t +"+title+str(mcc)
cm = confusion_matrix(predicted, Y_valid)
showconfusionmatrix(cm, title)
print "Confusion Matrix"
print (cm)
# In[10]:
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
clf = gnb.fit(X_train,Y_train)
print "Naive Bayes Fitted"
title = 'Naive Bayes'
predicted = clf.predict(X_valid)
mcc= matthews_corrcoef(Y_valid, predicted)
print "MCC Score \t +"+title+str(mcc)
cm = confusion_matrix(predicted, Y_valid)
showconfusionmatrix(cm, title)
print "Confusion Matrix"
print (cm)
# In[21]:
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.model_selection import GridSearchCV
# In[23]:
rf = RandomForestClassifier(n_estimators=20, n_jobs=2)
param_grid = {
'n_estimators': [5, 10, 15, 20],
'max_depth': [2, 5, 7, 9]
}
# In[24]:
grid_rf = GridSearchCV(rf, param_grid, cv=10)
rf_model=grid_rf.fit(X_train, Y_train)
# In[30]:
print "RF fitted"
titles = 'Random Forest'
predicted = rf_model.predict(X_valid)
mcc= matthews_corrcoef(Y_valid, predicted)
print "MCC Score \t +"+titles[0]+str(mcc)
cm = confusion_matrix(predicted, Y_valid)
showconfusionmatrix(cm, titles[0])
# In[31]:
gb = GradientBoostingClassifier(learning_rate=0.5)
param_grid = {
'n_estimators': [5, 10, 15, 20],
'max_depth': [2, 5, 7, 9]
}
# In[32]:
grid_gb = GridSearchCV(gb, param_grid, cv=10)
gb_model=grid_gb.fit(X_train, Y_train)
# In[36]:
print "GB fitted"
title = 'Gradient Boosting'
predicted = gb_model.predict(X_valid)
mcc= matthews_corrcoef(Y_valid, predicted)
print "MCC Score \t +"+title+str(mcc)
cm = confusion_matrix(predicted, Y_valid)
showconfusionmatrix(cm, title)
|
apache-2.0
|
nicain/dipde_dev
|
dipde/examples/excitatory_inhibitory.py
|
2
|
2373
|
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from dipde.internals.internalpopulation import InternalPopulation
from dipde.internals.externalpopulation import ExternalPopulation
from dipde.internals.network import Network
from dipde.internals.connection import Connection as Connection
import scipy.stats as sps
def get_network(dv=.001, verbose=False, update_method='approx', approx_order=1, tol=1e-14):
# Create network:
b1 = ExternalPopulation('100')
i1 = InternalPopulation(v_min=-.02, v_max=.02, dv=dv, update_method=update_method, approx_order=approx_order, tol=tol)
b1_i1 = Connection(b1, i1, 1, weights=.005, delays=([.005, .01],[.5,.5]))
b1_i1_2 = Connection(b1, i1, 1, weights=-.005, delays=sps.uniform(0,.01))
network = Network([b1, i1], [b1_i1, b1_i1_2])
return network
def example(show=True, save=False):
# Settings:
t0 = 0.
dt = .0001
dv = .0001
tf = .1
verbose = True
update_method = 'approx'
approx_order = 1
tol = 1e-14
# Run simulation:
network = get_network(dv=dv, verbose=verbose, update_method=update_method, approx_order=approx_order, tol=tol)
network.run(dt=dt, tf=tf, t0=t0)
i1 = network.population_list[1]
if show == True:
# Visualize:
plt.figure(figsize=(3,3))
plt.plot(i1.t_record, i1.firing_rate_record)
plt.xlim([0,tf])
plt.ylim(ymin=0)
plt.xlabel('Time (s)')
plt.ylabel('Firing Rate (Hz)')
plt.tight_layout()
if save == True: plt.savefig('./excitatory_inhibitory.png')
plt.show()
return i1.t_record, i1.firing_rate_record
if __name__ == "__main__": example() # pragma: no cover
|
gpl-3.0
|
eg-zhang/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
jnez71/demos
|
signals/fourier_transform.py
|
1
|
1952
|
#!/usr/bin/env python3
"""
Using a typical FFT routine and showing the principle
behind the DTFT computation.
"""
import numpy as np
from matplotlib import pyplot
##################################################
# Efficient practical usage
def fft(values, dt):
freqs = np.fft.rfftfreq(len(values), dt)
coeffs = np.sqrt(2.0/len(values)) * np.fft.rfft(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2.0) # don't "double count" the DC alias
return (freqs, coeffs)
# Working principle
def dtft(values, dt):
times = dt * np.arange(len(values))
nyquist = 1.0/(2.0*dt)
df = nyquist / (len(values)/2.0)
freqs = np.arange(0.0, nyquist+df, df)
# (rad/s)/Hz all f*t products
dtft_matrix = np.exp(-1j * (2.0*np.pi) * np.outer(freqs, times))
coeffs = np.sqrt(2.0/len(values)) * dtft_matrix.dot(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2.0) # don't "double count" the DC alias
return (freqs, coeffs)
##################################################
def function(time):
w = 20*np.pi
value = 0.0
for k in range(5):
value += (k+1)*np.cos((k*w)*time)
return value
dt = 0.001
times = np.arange(0.0, 0.2, dt)
values = function(times)
##################################################
fft_freqs, fft_coeffs = fft(values, dt)
dtft_freqs, dtft_coeffs = dtft(values, dt)
assert np.allclose(fft_freqs, dtft_freqs)
assert np.allclose(fft_coeffs, dtft_coeffs)
##################################################
# Demonstrate Parseval's theorem
print(np.linalg.norm(values))
print(np.linalg.norm(dtft_coeffs))
##################################################
fig = pyplot.figure()
ax = fig.add_subplot(2, 1, 1)
ax.plot(times, values)
ax.set_xlabel("Time (s)", fontsize=16)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.scatter(dtft_freqs, np.abs(dtft_coeffs))
ax.set_xlabel("Freq (Hz)", fontsize=16)
ax.grid(True)
pyplot.show()
|
mit
|
shahankhatch/scikit-learn
|
sklearn/tests/test_isotonic.py
|
230
|
11087
|
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
|
bsd-3-clause
|
NunoEdgarGub1/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/text.py
|
69
|
55366
|
"""
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text(%g,%g,%s)"%(self._y,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' % str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)"%(self._x,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation == None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation(%g,%g,%s)"%(self.xy[0],self.xy[1],repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
|
gpl-3.0
|
rianrajagede/iris-python
|
Tensorflow/iris_tf_v1.py
|
2
|
3582
|
from __future__ import print_function
from builtins import range
"""
SECTION 1 : Load and setup data for training
the datasets separated in two files from originai datasets:
iris_train.csv = datasets for training purpose, 80% from the original data
iris_test.csv = datasets for testing purpose, 20% from the original data
"""
import pandas as pd
#load
datatrain = pd.read_csv('../Datasets/iris/iris_train.csv')
#change string value to numeric
datatrain.loc[datatrain['species']=='Iris-setosa', 'species']=0
datatrain.loc[datatrain['species']=='Iris-versicolor', 'species']=1
datatrain.loc[datatrain['species']=='Iris-virginica', 'species']=2
datatrain = datatrain.apply(pd.to_numeric)
#change dataframe to array
datatrain_array = datatrain.values
#split x and y (feature and target)
xtrain = datatrain_array[:,:4]
ytrain = datatrain_array[:,4]
"""
SECTION 2 : Build and Train Model
Multilayer perceptron model, with one hidden layer.
input layer : 4 neuron, represents the feature of Iris
hidden layer : 10 neuron, activation using ReLU
output layer : 3 neuron, represents the class of Iris, Softmax Layer
optimizer = stochastic gradient descent with no batch-size
loss function = categorical cross entropy
learning rate = 0.0001
epoch = 1000
"""
import os
import tensorflow as tf
# tensorflow configuration
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # minimzing logging
cwd = os.path.abspath(os.path.dirname(__file__)) # path for saving model
saver_path = os.path.abspath(os.path.join(cwd, 'models/model_sess.ckpt'))
tf.set_random_seed(1103) # avoiding different result of random
# tensorflow model
input = tf.placeholder(tf.float32, [None, 4])
label = tf.placeholder(tf.float32, [None])
onehot_label = tf.one_hot(tf.cast(label, tf.int32), 3)
hidden = tf.layers.dense(input, 10, tf.nn.relu, name="hidden")
# kernel_initializer=tf.initializers.random_uniform(minval=-1, maxval=1, seed=123))
output = tf.layers.dense(hidden, 3, tf.nn.relu, name="output")
# kernel_initializer=tf.initializers.random_uniform(minval=-1, maxval=1, seed=123))
soft_output = tf.nn.softmax(output)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# optimization
loss = -tf.reduce_sum(onehot_label * tf.log(soft_output))
optimizer = tf.train.GradientDescentOptimizer(0.0001)
is_correct = tf.equal(tf.argmax(soft_output,1), tf.argmax(onehot_label,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
train_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(init)
# start training
for e in range(1000):
if(e%100==0):
print(e,"/ 1000 - Loss:", sess.run(loss, feed_dict={input:xtrain, label:ytrain}))
sess.run(train_step, feed_dict={input:xtrain, label:ytrain})
# save model
saver.save(sess, saver_path)
print("Train accuracy",sess.run(accuracy, feed_dict={input:xtrain, label:ytrain}))
"""
SECTION 3 : Testing model
"""
#load
datatest = pd.read_csv('../Datasets/iris/iris_test.csv')
#change string value to numeric
datatest.loc[datatest['species']=='Iris-setosa', 'species']=0
datatest.loc[datatest['species']=='Iris-versicolor', 'species']=1
datatest.loc[datatest['species']=='Iris-virginica', 'species']=2
datatest = datatest.apply(pd.to_numeric)
#change dataframe to array
datatest_array = datatest.values
#split x and y (feature and target)
xtest = datatest_array[:,:4]
ytest = datatest_array[:,4]
# get the model then test
with tf.Session() as sess:
saver.restore(sess, saver_path)
print("Test accuracy",sess.run(accuracy, feed_dict={input:xtest, label:ytest}))
|
mit
|
sserrot/champion_relationships
|
venv/Lib/site-packages/ipykernel/eventloops.py
|
1
|
13124
|
# encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from functools import partial
import os
import sys
import platform
import zmq
from distutils.version import LooseVersion as V
from traitlets.config.application import Application
def _use_appnope():
"""Should we use appnope for dealing with OS X app nap?
Checks if we are on OS X 10.9 or greater.
"""
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
def process_stream_events():
"""fall back to main loop when there's a socket event"""
# call flush to ensure that the stream doesn't lose events
# due to our consuming of the edge-triggered FD
# flush returns the number of events consumed.
# if there were any, wake it up
if stream.flush(limit=1):
notifier.setEnabled(False)
kernel.app.quit()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
# there may already be unprocessed events waiting.
# these events will not wake zmq's edge-triggered FD
# since edge-triggered notification only occurs on new i/o activity.
# process all the waiting events immediately
# so we start in a clean state ensuring that any new i/o events will notify.
# schedule first call on the eventloop as soon as it's running,
# so we don't block here processing events
timer = QtCore.QTimer(kernel.app)
timer.setSingleShot(True)
timer.timeout.connect(process_stream_events)
timer.start(0)
# mapping of keys to loop functions
loop_map = {
'inline': None,
'nbagg': None,
'notebook': None,
'ipympl': None,
'widget': None,
None: None,
}
def register_integration(*toolkitnames):
"""Decorator to register an event loop to integrate with the IPython kernel
The decorator takes names to register the event loop as for the %gui magic.
You can provide alternative names for the same toolkit.
The decorated function should take a single argument, the IPython kernel
instance, arrange for the event loop to call ``kernel.do_one_iteration()``
at least every ``kernel._poll_interval`` seconds, and start the event loop.
:mod:`ipykernel.eventloops` provides and registers such functions
for a few common event loops.
"""
def decorator(func):
for name in toolkitnames:
loop_map[name] = func
func.exit_hook = lambda kernel: None
def exit_decorator(exit_func):
"""@func.exit is now a decorator
to register a function to be called on exit
"""
func.exit_hook = exit_func
return exit_func
func.exit = exit_decorator
return func
return decorator
def _loop_qt(app):
"""Inner-loop for running the Qt eventloop
Pulled from guisupport.start_event_loop in IPython < 5.2,
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
rather than if the eventloop is actually running.
"""
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
@register_integration('qt4')
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
# Only register the eventloop for the shell stream because doing
# it for the control stream is generating a bunch of unnecessary
# warnings on Windows.
_notify_stream_qt(kernel, kernel.shell_streams[0])
_loop_qt(kernel.app)
@register_integration('qt', 'qt5')
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration."""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
# exit and watch are the same for qt 4 and 5
@loop_qt4.exit
@loop_qt5.exit
def loop_qt_exit(kernel):
kernel.app.exit()
def _loop_wx(app):
"""Inner-loop for running the Wx eventloop
Pulled from guisupport.start_event_loop in IPython < 5.2,
since IPython 5.2 only checks `get_ipython().active_eventloop` is defined,
rather than if the eventloop is actually running.
"""
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
@register_integration('wx')
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
# Wx uses milliseconds
poll_interval = int(1000 * kernel._poll_interval)
def wake():
"""wake from wx"""
for stream in kernel.shell_streams:
if stream.flush(limit=1):
kernel.app.ExitMainLoop()
return
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to defer back to the tornado event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(wake)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
if not (
getattr(kernel, 'app', None)
and isinstance(kernel.app, wx.App)
):
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
_loop_wx(kernel.app)
@loop_wx.exit
def loop_wx_exit(kernel):
import wx
wx.Exit()
@register_integration('tk')
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
from tkinter import Tk, READABLE
app = Tk()
# Capability detection:
# per https://docs.python.org/3/library/tkinter.html#file-handlers
# file handlers are not available on Windows
if hasattr(app, 'createfilehandler'):
# A basic wrapper for structural similarity with the Windows version
class BasicAppWrapper(object):
def __init__(self, app):
self.app = app
self.app.withdraw()
def process_stream_events(stream, *a, **kw):
"""fall back to main loop when there's a socket event"""
if stream.flush(limit=1):
app.tk.deletefilehandler(stream.getsockopt(zmq.FD))
app.quit()
# For Tkinter, we create a Tk object and call its withdraw method.
kernel.app_wrapper = BasicAppWrapper(app)
for stream in kernel.shell_streams:
notifier = partial(process_stream_events, stream)
# seems to be needed for tk
notifier.__name__ = "notifier"
app.tk.createfilehandler(stream.getsockopt(zmq.FD), READABLE, notifier)
# schedule initial call after start
app.after(0, notifier)
app.mainloop()
else:
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000 * kernel._poll_interval)
class TimedAppWrapper(object):
def __init__(self, app, func):
self.app = app
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.app_wrapper = TimedAppWrapper(app, doi)
kernel.app_wrapper.start()
@loop_tk.exit
def loop_tk_exit(kernel):
kernel.app_wrapper.app.destroy()
@register_integration('gtk')
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
kernel._gtk = gtk_kernel
@loop_gtk.exit
def loop_gtk_exit(kernel):
kernel._gtk.stop()
@register_integration('gtk3')
def loop_gtk3(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtk3embed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
kernel._gtk = gtk_kernel
@loop_gtk3.exit
def loop_gtk3_exit(kernel):
kernel._gtk.stop()
@register_integration('osx')
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
from ._eventloop_macos import mainloop, stop
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
# wake the eventloop when we get a signal
stop()
if etype is KeyboardInterrupt:
print("KeyboardInterrupt caught in CFRunLoop", file=sys.__stdout__)
else:
real_excepthook(etype, value, tb)
while not kernel.shell.exit_now:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
mainloop(kernel._poll_interval)
for stream in kernel.shell_streams:
if stream.flush(limit=1):
# events to process, return control to kernel
return
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
print("KeyboardInterrupt caught in kernel", file=sys.__stdout__)
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
@loop_cocoa.exit
def loop_cocoa_exit(kernel):
from ._eventloop_macos import stop
stop()
@register_integration('asyncio')
def loop_asyncio(kernel):
'''Start a kernel with asyncio event loop support.'''
import asyncio
loop = asyncio.get_event_loop()
# loop is already running (e.g. tornado 5), nothing left to do
if loop.is_running():
return
if loop.is_closed():
# main loop is closed, create a new one
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop._should_close = False
# pause eventloop when there's an event on a zmq socket
def process_stream_events(stream):
"""fall back to main loop when there's a socket event"""
if stream.flush(limit=1):
loop.stop()
for stream in kernel.shell_streams:
fd = stream.getsockopt(zmq.FD)
notifier = partial(process_stream_events, stream)
loop.add_reader(fd, notifier)
loop.call_soon(notifier)
while True:
error = None
try:
loop.run_forever()
except KeyboardInterrupt:
continue
except Exception as e:
error = e
if loop._should_close:
loop.close()
if error is not None:
raise error
break
@loop_asyncio.exit
def loop_asyncio_exit(kernel):
"""Exit hook for asyncio"""
import asyncio
loop = asyncio.get_event_loop()
@asyncio.coroutine
def close_loop():
if hasattr(loop, 'shutdown_asyncgens'):
yield from loop.shutdown_asyncgens()
loop._should_close = True
loop.stop()
if loop.is_running():
close_loop()
elif not loop.is_closed():
loop.run_until_complete(close_loop)
loop.close()
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop
|
mit
|
icdishb/scikit-learn
|
sklearn/covariance/tests/test_covariance.py
|
142
|
11068
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.