prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyburst.grids import grid_analyser
def compare(batch, source, ref_source, bprops=('rate', 'fluence', 'peak')):
"""Compares models with differe bdats/adapnets"""
kgrid = grid_analyser.Kgrid(source, linregress_burst_rate=False)
kgrid_ref = grid_analyser.Kgrid(ref_source, linregress_burst_rate=False)
sub_params = kgrid.get_params(batch).reset_index()
sub_summ = kgrid.get_summ(batch).reset_index()
params_ref, summ_ref = extract_ref_subset(param_table=sub_params, kgrid_ref=kgrid_ref)
fig, ax = plt.subplots(len(bprops), 1, figsize=(10, 12))
for i, bprop in enumerate(bprops):
u_bprop = f'u_{bprop}'
ratio = sub_summ[bprop] / summ_ref[bprop]
u_frac = sub_summ[u_bprop]/sub_summ[bprop] + summ_ref[u_bprop]/summ_ref[bprop]
u_ratio = ratio * u_frac
n = len(ratio)
ax[i].errorbar(np.arange(n), ratio, yerr=u_ratio, ls='none', marker='o', capsize=3)
ax[i].plot([0, n], [1, 1], color='black')
ax[i].set_ylabel(bprop)
plt.tight_layout()
plt.show(block=False)
def extract_ref_subset(param_table, kgrid_ref):
"""Returns subset of reference grid that matches comparison subset
"""
params_out = pd.DataFrame()
summ_out = pd.DataFrame()
for row in param_table.itertuples():
params = {'z': row.z, 'x': row.x, 'accrate': row.accrate,
'qb': row.qb, 'mass': row.mass}
sub_params = kgrid_ref.get_params(params=params)
sub_summ = kgrid_ref.get_summ(params=params)
if len(sub_params) is 0:
raise RuntimeError(f'No corresponding model for {params}')
if len(sub_params) > 1:
raise RuntimeError(f'Multiple models match {params}')
params_out = pd.concat((params_out, sub_params), ignore_index=True)
summ_out = | pd.concat((summ_out, sub_summ), ignore_index=True) | pandas.concat |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([ | pd.Timedelta('1 days') | pandas.Timedelta |
#!/usr/bin/env python3
# This script is included in documentation. Adapt line numbers if touched.
import glob
import pandas, seaborn
results = glob.glob("result_mpi_*.csv")
df_list = []
for result in results:
df_list.append( | pandas.read_csv(result) | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals( | pandas.Series([True, True, False]) | pandas.Series |
import joblib
import pandas as pd
import numpy as np
class RandomForestClassifier:
def __init__(self):
path_to_artifacts = "../../research/"
self.value_fill_missing = joblib.load(path_to_artifacts + "pi_train_mode.joblib")
self.model = joblib.load(path_to_artifacts + "pi_random_forest.joblib")
def preprocessing(self, input_data):
print("Preprocessing...")
input_data = | pd.DataFrame(input_data, index=[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Updating EDDI on a monthly basis.
Run this using crontab once a month this to pull netcdf files from the
NOAA's PSD FTP server, transform them to fit in the app, and either
append them to an existing file, or build the data set from scratch. This
also rebuilds each percentile netcdf entirely because those are rank based.
For more information check Get_WWDT.py
Created on Fri Feb 10 14:33:38 2019
@author: User
"""
import calendar
import datetime as dt
import ftplib
from glob import glob
from netCDF4 import Dataset
import numpy as np
import os
from osgeo import gdal
import pandas as pd
import pathlib
import sys
from tqdm import tqdm
import xarray as xr
# Refactor all of this
pwd = str(pathlib.Path(__file__).parent.absolute())
data_path = os.path.join(pwd, "..")
sys.path.insert(0, data_path)
from functions import isInt, toNetCDF, toNetCDFAlbers, toNetCDFPercentile
# gdal.PushErrorHandler('CPLQuietErrorHandler')
os.environ['GDAL_PAM_ENABLED'] = 'NO'
# There are often missing epsg codes in the gcs.csv file, but proj4 works
proj = ('+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 ' +
'+ellps=GRS80 +datum=NAD83 +units=m no_defs')
# Get resolution from file call
try:
res = float(sys.argv[1])
except:
res = 0.25
# In[] Data source and target directory
ftp_path = 'ftp://ftp.cdc.noaa.gov/Projects/EDDI/CONUS_archive/data'
temp_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/eddi')
pc_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/percentiles')
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
if not os.path.exists(pc_folder):
os.makedirs(pc_folder)
# In[] Index options
indices = ['eddi1', 'eddi2', 'eddi3', 'eddi4', 'eddi5', 'eddi6', 'eddi7',
'eddi8', 'eddi9', 'eddi10', 'eddi11', 'eddi12']
# In[] Define scraping routine
def getEDDI(scale, date, temp_folder, write=False):
'''
These come out daily, but each represents the accumulated conditions of the
prior 30 days. Since we want one value per month we are only downloading
the last day of the month. I'm not sure whether it will be possible to
append this directly to an exiting netcdf or if we need to write to a file
first.
'''
year = date.year
month = date.month
last_day = calendar.monthrange(year, month)[1]
if not write:
memory_file = []
def appendline(line):
memory_file.append(line)
try:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day)
ftp.retrlines('RETR ' + file_name, appendline)
except:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day-1)
ftp.retrlines('RETR ' + file_name, appendline)
return memory_file
else:
def writeline(line):
local_file.write(line + "\n")
local_file = open(os.path.join(temp_folder, 'eddi.asc'), 'w')
try:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day)
ftp.retrlines('RETR ' + file_name, writeline)
except:
file_name = 'EDDI_ETrs_{:02d}mn_{}{:02d}{}.asc'.format(scale, year,
month, last_day - 1)
ftp.retrlines('RETR ' + file_name, writeline)
local_file.close()
return os.path.join(temp_folder, 'eddi.asc')
# In[] Today's date, month, and year
todays_date = dt.datetime.today()
today = np.datetime64(todays_date)
print("##")
print("#####")
print("############")
print("#######################")
print("#######################################")
print("####################################################")
print("\nRunning Get_EDDI.py using a " + str(res) + " degree resolution:\n")
print(str(today) + '\n')
# In[] Get time series of currently available values
# Connect to FTP
ftp = ftplib.FTP('ftp.cdc.noaa.gov', 'anonymous', '<EMAIL>')
for index in indices:
ftp.cwd('/Projects/EDDI/CONUS_archive/data/')
print('\n' + index)
original_path = os.path.join(data_path, "data/droughtindices/netcdfs/",
index + '.nc')
albers_path = os.path.join(data_path, "data/droughtindices/netcdfs/albers",
index + '.nc')
percentile_path = os.path.join(data_path,
"data/droughtindices/netcdfs/percentiles",
index + '.nc')
scale = index[-2:]
scale = int("".join([s for s in scale if isInt(s)]))
# Delete existing contents of temporary folder
temps = glob(os.path.join(temp_folder, "*"))
for t in temps:
os.remove(t)
####### If we are only missing some dates #################################
if os.path.exists(original_path):
with xr.open_dataset(original_path) as data:
dates = | pd.DatetimeIndex(data.time.data) | pandas.DatetimeIndex |
import os
import sys
import pandas as pd
import numpy as np
import random
from utils import *
np.random.seed(9527)
random.seed(9527)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--max_movie_num', type=int, default=1000)
parser.add_argument('--max_user_num', type=int, default=1000)
parser.add_argument('--max_user_like_tag', type=int, default=20)
parser.add_argument('--min_user_like_tag', type=int, default=10)
parser.add_argument('--max_tag_per_movie', type=int, default=8)
parser.add_argument('--min_tag_per_movie', type=int, default=8)
parser.add_argument('--rater', type=str, default='qualitybase')
parser.add_argument('--recsys', type=str, default='Pop')
parser.add_argument('--rcttag_user_num', type=int, default=100)
parser.add_argument('--rcttag_movie_num', type=int, default=10)
parser.add_argument('--missing_rate_rating', type=float, default=0.02)
parser.add_argument('--missing_type_rating', type=str, default='default')
parser.add_argument('--missing_rate_obstag', type=float, default=0.007)
parser.add_argument('--missing_type_obstag', type=str, default='default')
parser.add_argument('--quality_sigma', type=float, default=0.75)
parser.add_argument('--test_identifiable_num', type=int, default=5000)
parser.add_argument('--test_identifiable_num_positive',
type=int,
default=1500)
parser.add_argument('--test_inidentifiable_num', type=int, default=4000)
parser.add_argument('--test_inidentifiable_positive',
type=int,
default=1200)
parser.add_argument('--obstag_non_missing_rate', type=float, default=0.6)
parser.add_argument('--need_trainset', type=int, default=0)
parser.add_argument('--need_testset', type=int, default=0)
parser.add_argument('--rerank_id', type=int, default=1)
args = parser.parse_args()
paras = vars(args)
data_dir = './'
if not os.path.exists(data_dir + 'generate_data/'):
os.makedirs(data_dir + 'generate_data/')
for i in ['train', 'test']:
if not os.path.exists(data_dir + 'final_data/before_rerank_id/' + i):
os.makedirs(data_dir + 'final_data/before_rerank_id/' + i)
if not os.path.exists(data_dir + 'final_data/rerank_id/' + i):
os.makedirs(data_dir + 'final_data/rerank_id/' + i)
big_movie_tag_ct = pd.read_csv(data_dir + 'original_data/movie_tag_ct.csv')
base_movie_rating = pd.read_csv(data_dir +
'original_data/movie_rating.csv',
index_col='movieid')
print('======generating base data======')
# generate user_id data
if os.path.exists(data_dir + 'generate_data/user_id.csv'):
user_id = np.array(
pd.read_csv(data_dir + 'generate_data/user_id.csv',
index_col='userid').index)
max_user_num = len(user_id)
else:
max_user_num = paras['max_user_num']
user_id = np.array(range(max_user_num))
pd.DataFrame(data=user_id,
columns=['userid']).set_index('userid').to_csv(
data_dir + 'generate_data/user_id.csv', header=True)
# generate movie_id data
mv_tag_count: pd.DataFrame = big_movie_tag_ct[[
'movieid', 'tagCount'
]].groupby('movieid')['tagCount'].sum().sort_values(
ascending=False) # 每部电影被多少人次打过tag
if os.path.exists(data_dir + 'generate_data/movie_id.csv'):
movie_data = pd.read_csv(data_dir + 'generate_data/movie_id.csv',
index_col='movieid')
movie_id = np.array(movie_data.index)
max_movie_num = len(movie_id)
else:
max_movie_num = min(len(mv_tag_count), paras['max_movie_num'])
movie_id = np.array(mv_tag_count.head(max_movie_num).index)
movie_data = pd.DataFrame(data=movie_id,
columns=['movieid']).set_index('movieid')
movie_data.to_csv(data_dir + 'generate_data/movie_id.csv', header=True)
obstag_count: pd.DataFrame = big_movie_tag_ct[
big_movie_tag_ct['movieid'].isin(movie_id)].groupby(
'tagid')['tagCount'].sum().sort_values(
ascending=False).to_frame() # 统计选出来的电影集合里,以标签为单位,每个标签的总数量
rct_distribution = obstag_count['tagCount'] / \
obstag_count['tagCount'].sum() # 生成标签流行度的分布
obstag_count.to_csv(data_dir + 'generate_data/obstag_count.csv',
header=True)
# generate movie_real_tag_list data
if os.path.exists(data_dir + 'generate_data/movie_real_tag_list.csv'):
movie_real_tag_list = pd.read_csv(
data_dir + 'generate_data/movie_real_tag_list.csv',
index_col='movieid')
movie_real_tag_list['taglist'] = movie_real_tag_list['taglist'].apply(
eval)
# print(movie_real_tag_list.head())
else:
movie_real_tag_list = pd.DataFrame()
for mid in movie_id:
mv_tag = big_movie_tag_ct[big_movie_tag_ct['movieid'] ==
mid]['tagid'].to_list()
mv_tag = mv_tag[:paras['max_tag_per_movie']] # 如果一部电影标签过多,删除过多的标签
while len(mv_tag) < paras['min_tag_per_movie']: # 如果一部电影标签过少,补全标签
newtag = random_tag(obstag_count, 1, rct_distribution)[0]
if newtag not in mv_tag:
mv_tag.append(newtag)
movie_real_tag_list = movie_real_tag_list.append(
{
'movieid': mid,
'taglist': mv_tag
}, ignore_index=True)
movie_real_tag_list['movieid'] = movie_real_tag_list['movieid'].astype(
'int64')
movie_real_tag_list = movie_real_tag_list.set_index('movieid')
movie_real_tag_list.to_csv(data_dir +
'generate_data/movie_real_tag_list.csv',
header=True)
# generate user_like_tag_list data
if os.path.exists(data_dir + 'generate_data/user_like_tag_list.csv'):
user_like_tag_list = pd.read_csv(
data_dir + 'generate_data/user_like_tag_list.csv',
index_col='userid')
user_like_tag_list['user_like_tag'] = user_like_tag_list[
'user_like_tag'].apply(eval)
# print(user_like_tag_list.head())
else:
max_user_like_tag = paras['max_user_like_tag']
min_user_like_tag = paras['min_user_like_tag']
user_tag_count = np.random.randint(
low=min_user_like_tag,
high=max_user_like_tag + 1,
size=max_user_num) # 生成每个user喜欢的标签数量
user_like_tag_list: pd.DataFrame = generate_user_like_tag(
user_id, user_tag_count, obstag_count,
rct_distribution) # 生成每个用户喜欢的标签
user_like_tag_list.to_csv(data_dir +
'generate_data/user_like_tag_list.csv',
header=True)
# generate Real_RCTtag
if os.path.exists(data_dir + 'generate_data/real_rcttag.csv'):
real_rcttag = pd.read_csv(data_dir + 'generate_data/real_rcttag.csv',
index_col='userid')
real_rcttag.columns = map(eval, real_rcttag.columns)
real_rcttag = real_rcttag.applymap(eval)
# print(real_rcttag.head())
else:
real_rcttag: pd.DataFrame = pd.DataFrame(index=user_id,
columns=movie_id)
real_rcttag.index.name = 'userid'
for uid, mid in [(x, y) for x in user_id for y in movie_id]:
mv_tag = movie_real_tag_list.loc[mid, 'taglist']
user_tag = user_like_tag_list.loc[uid, 'user_like_tag']
real_rcttag.loc[uid, mid] = list(
set(mv_tag).intersection(set(user_tag)))
real_rcttag.to_csv(data_dir + 'generate_data/real_rcttag.csv',
header=True)
# generate Quality data
if os.path.exists(data_dir + 'generate_data/quality.csv'):
quality = pd.read_csv(data_dir + 'generate_data/quality.csv',
index_col='movieid')
# print(quality.head())
else:
quality_sigma = paras['quality_sigma']
quality: pd.DataFrame = pd.DataFrame(index=movie_id,
columns=['quality'])
quality.index.name = 'movieid'
quality['quality'] = base_movie_rating.loc[movie_id] + \
np.random.normal(loc=0, scale=quality_sigma,
size=len(movie_id)).reshape(-1, 1)
quality.to_csv(data_dir + 'generate_data/quality.csv', header=True)
# generate Rating data
if os.path.exists(data_dir + 'generate_data/rating.csv'):
rating = pd.read_csv(data_dir + 'generate_data/rating.csv',
index_col='userid')
rating.columns = map(eval, rating.columns)
# print(rating.head())
else:
rating: pd.DataFrame = pd.DataFrame(index=user_id, columns=movie_id)
rating.index.name = 'userid'
rater = paras['rater']
rating = get_rating(rating,
user_id=user_id,
movie_id=movie_id,
user_like_tag_list=user_like_tag_list,
movie_real_tag_list=movie_real_tag_list,
max_user_num=max_user_num,
max_movie_num=max_movie_num,
rater=rater,
quality=quality)
rating.to_csv(data_dir + 'generate_data/rating.csv', header=True)
# generate Recsys
if os.path.exists(data_dir + 'generate_data/recsys.csv'):
recsys = pd.read_csv(data_dir + 'generate_data/recsys.csv',
index_col='userid')
recsys.columns = map(eval, recsys.columns)
# print(recsys.head())
else:
recsys: pd.DataFrame = pd.DataFrame(index=user_id, columns=movie_id)
recsys.index.name = 'userid'
recsys = get_recsys_score(recsys,
max_movie_num=max_movie_num,
mv_tag_count=mv_tag_count)
recsys.to_csv(data_dir + 'generate_data/recsys.csv', header=True)
# generate R_RCTTag
if os.path.exists(data_dir + 'generate_data/r_rcttag.csv'):
r_rcttag = pd.read_csv(data_dir + 'generate_data/r_rcttag.csv',
index_col='userid')
r_rcttag.columns = map(eval, r_rcttag.columns)
else:
rcttag_user_num = paras['rcttag_user_num']
rcttag_movie_num = paras['rcttag_movie_num']
r_rcttag: pd.DataFrame = pd.DataFrame(data=0,
index=user_id,
columns=movie_id)
r_rcttag.index.name = 'userid'
u_list = np.random.choice(user_id, size=rcttag_user_num, replace=False)
for uid in u_list:
m_list = np.random.choice(movie_id,
size=rcttag_movie_num,
replace=False)
r_rcttag.loc[uid, m_list] = 1
r_rcttag.to_csv(data_dir + 'generate_data/r_rcttag.csv', header=True)
# generate R-Rating
if os.path.exists(data_dir + 'generate_data/r_rating.csv'):
r_rating = pd.read_csv(data_dir + 'generate_data/r_rating.csv',
index_col='userid')
r_rating.columns = map(eval, r_rating.columns)
else:
missing_rate = paras['missing_rate_rating']
missing_type = paras['missing_type_rating']
r_rating: pd.DataFrame = pd.DataFrame(data=0,
index=user_id,
columns=movie_id)
r_rating.index.name = 'userid'
r_rating = get_r_rating(r_rating,
missing_rate=missing_rate,
missing_type=missing_type,
recsys=recsys)
r_rating.to_csv(data_dir + 'generate_data/r_rating.csv', header=True)
# generate R-Obstag
if os.path.exists(data_dir + 'generate_data/r_obstag.csv'):
r_obstag = pd.read_csv(data_dir + 'generate_data/r_obstag.csv',
index_col='userid')
r_obstag.columns = map(eval, r_obstag.columns)
else:
missing_rate = paras['missing_rate_obstag']
missing_type = paras['missing_type_obstag']
r_obstag: pd.DataFrame = pd.DataFrame(data=0,
index=user_id,
columns=movie_id)
r_obstag.index.name = 'userid'
r_obstag = get_r_obstag(r_obstag,
missing_rate=missing_rate,
missing_type=missing_type,
recsys=recsys,
rating=rating)
r_obstag.to_csv(data_dir + 'generate_data/r_obstag.csv', header=True)
# generate train data
print('======generating train data======')
need_trainset = paras['need_trainset']
if need_trainset == 1:
# output movie data
movie_data['taglist'] = movie_real_tag_list['taglist'].map(
lambda xx: ','.join([str(x) for x in xx]))
movie_data[['taglist']].to_csv(
data_dir + 'final_data/before_rerank_id/train/movie.csv',
header=True,
index=True)
# movie_data = pd.read_csv(data_dir + 'final_data/train/movie.csv')
# print(movie_data.head())
# output rating, rcttag and obstag
rating_out = pd.DataFrame(columns=['userid', 'movieid', 'rating'])
rcttag_out = pd.DataFrame(columns=['userid', 'movieid', 'tagid'])
obstag_out = pd.DataFrame(columns=['userid', 'movieid', 'tagid'])
obstag_missing = pd.DataFrame(columns=['userid', 'movieid', 'tagid'])
obstag_nonmissing_rate = paras['obstag_non_missing_rate']
missingcount = 0
nonmissingcount = 0
for uid, mid in [(x, y) for x in user_id for y in movie_id]:
if r_rating.loc[uid, mid] == 1:
rating_out = rating_out.append(
{
'userid': uid,
'movieid': mid,
'rating': rating.loc[uid, mid]
},
ignore_index=True)
tmp_tag = real_rcttag.loc[uid, mid]
if r_rcttag.loc[uid, mid] == 1:
if len(tmp_tag) == 0:
rcttag_out = rcttag_out.append(
{
'userid': uid,
'movieid': mid,
'tagid': -1
},
ignore_index=True)
for tag in tmp_tag:
rcttag_out = rcttag_out.append(
{
'userid': uid,
'movieid': mid,
'tagid': tag
},
ignore_index=True)
if r_obstag.loc[uid, mid] == 1:
if len(tmp_tag) == 0:
obstag_out = obstag_out.append(
{
'userid': uid,
'movieid': mid,
'tagid': -1
},
ignore_index=True)
for i, tag in enumerate(tmp_tag):
if i == 0 or (i > 0 and
np.random.random() < obstag_nonmissing_rate):
if i > 0:
missingcount += 1
obstag_out = obstag_out.append(
{
'userid': uid,
'movieid': mid,
'tagid': tag
},
ignore_index=True)
elif i > 0:
obstag_missing = obstag_missing.append(
{
'userid': uid,
'movieid': mid,
'tagid': tag
},
ignore_index=True)
nonmissingcount += 1
rating_out['userid'] = rating_out['userid'].astype('int64')
rating_out['movieid'] = rating_out['movieid'].astype('int64')
rating_out['rating'] = rating_out['rating'].astype('int64')
rating_out.to_csv(data_dir +
'final_data/before_rerank_id/train/rating.csv',
header=True,
index=False)
rcttag_out['userid'] = rcttag_out['userid'].astype('int64')
rcttag_out['movieid'] = rcttag_out['movieid'].astype('int64')
rcttag_out.to_csv(data_dir +
'final_data/before_rerank_id/train/rcttag.csv',
header=True,
index=False)
obstag_out['userid'] = obstag_out['userid'].astype('int64')
obstag_out['movieid'] = obstag_out['movieid'].astype('int64')
obstag_out.to_csv(data_dir +
'final_data/before_rerank_id/train/obstag.csv',
header=True,
index=False)
obstag_missing['movieid'] = obstag_missing['movieid'].astype('int64')
obstag_missing.to_csv(data_dir + 'generate_data/obstag_missing.csv',
header=True,
index=False)
print("non mising rate",
missingcount / (missingcount + nonmissingcount))
print("non missing count", missingcount, "missingcount",
nonmissingcount)
print('======generating test set======')
# generate test set
need_testset = paras['need_testset']
if need_testset == 1:
test_identifiable_num = paras['test_identifiable_num']
test_identifiable_num_positive = paras[
'test_identifiable_num_positive']
test_inidentifiable_num = paras['test_inidentifiable_num']
test_inidentifiable_positive = paras['test_inidentifiable_positive']
test_set: pd.DataFrame = pd.DataFrame(
columns=['userid', 'tagid', 'islike'])
if not os.path.exists(data_dir + 'generate_data/extract.csv'):
os.system('python extract_data.py')
extract_pd: pd.DataFrame = pd.read_csv(data_dir +
'generate_data/extract.csv')
extract_dict = dict(
zip(zip(extract_pd['userid'], extract_pd['tagid']),
extract_pd['islike']))
# generating obstag missing data
obstag_missing: pd.DataFrame = pd.read_csv(
data_dir + 'generate_data/obstag_missing.csv')[['userid', 'tagid']]
obstag_missing['islike'] = 1
obstag_missing_dict = dict(
zip(zip(obstag_missing['userid'], obstag_missing['tagid']),
obstag_missing['islike']))
test_1 = obstag_missing
test_set = test_set.append(test_1, ignore_index=True)
# generating identifiable data
positive_count = 0
negtive_count = 0
rating_out = pd.read_csv(
data_dir + 'final_data/before_rerank_id/train/rating.csv')
tmp_pos = pd.DataFrame(columns=['userid', 'tagid', 'islike'])
tmp_neg = pd.DataFrame(columns=['userid', 'tagid', 'islike'])
for i in range(test_identifiable_num):
find_tag = False
while not find_tag:
tmprating = rating_out.sample(n=1, axis=0)
uid = int(tmprating['userid'])
mid = int(tmprating['movieid'])
for tmptag in movie_real_tag_list.loc[mid]['taglist']:
if not ((uid, tmptag) in obstag_missing_dict or
(uid, tmptag) in extract_dict):
if tmptag in user_like_tag_list.loc[uid][
'user_like_tag'] and positive_count < test_identifiable_num_positive:
tmp_pos = tmp_pos.append(
{
'userid': uid,
'tagid': tmptag,
'islike': 1
},
ignore_index=True)
find_tag = True
positive_count += 1
break
elif tmptag not in user_like_tag_list.loc[uid][
'user_like_tag'] and negtive_count < test_identifiable_num - test_identifiable_num_positive:
tmp_neg = tmp_neg.append(
{
'userid': uid,
'tagid': tmptag,
'islike': 0
},
ignore_index=True)
find_tag = True
negtive_count += 1
break
test_2_pos = tmp_pos
test_2 = tmp_neg
test_2 = test_2.append(test_2_pos, ignore_index=True)
test_set = test_set.append(test_2, ignore_index=True)
test_2_dict = dict(
zip(zip(test_2['userid'], test_2['tagid']), test_2['islike']))
# generating inidentifiable data
positive_count = 0
negtive_count = 0
rating_ut_dict = {}
tmp_pos = | pd.DataFrame(columns=['userid', 'tagid', 'islike']) | pandas.DataFrame |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
#
# SCRIPT : merge_data_for_classifier.py
# POURPOSE : TODO: Update
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
#
# V1.0 : XX/XX/XXXX [<NAME>]
#
# TODO: VERFIRY THE OPUTS OF THIS SCRIPT!
#
#
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import os
import argparse
import random
import string
from glob import glob
from natsort import natsorted
import numpy as np
from skimage.io import imread, imsave
from skimage.color import grey2rgb
import pandas as pd
def random_string(length=16):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
if __name__ == "__main__":
print("\nExtracting data for the classifier, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i",
nargs="*",
action="store",
dest="input",
required=True,
help="Input processed folders.",)
parser.add_argument("--crop-size",
nargs=1,
action="store",
dest="crop_size",
default=[None],
required=False,
help="Output size.",)
parser.add_argument("--target-labels",
nargs="*",
action="store",
dest="target_labels",
default=[4, 5],
required=False,
help="Target labels to consider as wave breaking.",)
parser.add_argument("--output", "-o",
nargs=1,
action="store",
dest="output",
default=["classifier/"],
required=False,
help="Output path.",)
args = parser.parse_args()
# input paths
paths = args.input
# output path
out = args.output[0]
os.makedirs(out, exist_ok=True)
print("\nProcessing labels:\n")
dfs = []
for i, path in enumerate(paths):
if os.path.isdir(path):
print("Processing path {}".format(path))
# read csv file
xls = glob(path+"/*.xlsx")
if xls:
print(" + labels found")
df = pd.read_excel(xls[0])
dfs.append(df)
df = | pd.concat(dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""De-Stress Chatbot.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nJOL3jGeZyfNRaxrWqK26mLz4VQd7xZo
# Funciones
"""
def is_question(input_string):
for i in input_string:
if i == '?':
output = True
else:
output = False
return output
def remove_punctuation(input_string):
out_string = ""
for i in input_string:
if i not in string.punctuation:
out_string += i
return out_string
def prepare_text(input_string):
temp_string = input_string.lower()
temp_string = remove_punctuation(temp_string)
out_list = temp_string.split()
return out_list
def respond_echo(input_string, number_of_echoes,spacer):
if input_string != None:
echo_output = (input_string + spacer) * number_of_echoes
else:
echo_output = None
return echo_output
def selector(input_list, check_list, return_list):
output = None
for i in input_list:
if i in check_list:
output = random.choice(return_list)
break
return output
def string_concatenator(string1, string2, separator):
output = string1 + separator + string2
return output
def list_to_string(input_list, separator):
output = input_list[0]
for i in input_list[1:]:
output = string_concatenator(output, i, separator)
return output
def end_chat(input_list):
if 'quit' in input_list:
output = True
else:
output = False
return output
def is_in_list(list_one, list_two):
"""Check if any element of list_one is in list_two."""
for element in list_one:
if element in list_two:
return True
return False
def find_in_list(list_one, list_two):
"""Find and return an element from list_one that is in list_two, or None otherwise."""
for element in list_one:
if element in list_two:
return element
return None
def is_points(input_string):
p = 0
h = []
for i in input_string:
o = i.count('.')
if o == 1:
p += 1
if p == 5:
break
h.append(i)
h.append('.')
return h
"""# Librerias"""
#!pip install covid
from covid import Covid
import string
import random
import nltk
import pandas as pd
import numpy as np
import textwrap
import cv2
"""# Información"""
#!git clone https://github.com/ChatBotChallengeCdCMX/ChatBotForCovidDe-stress.git
Hombres = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/nombreshombres .csv')
Mujeres = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/nombresmujeres.csv')
Hombres = list(Hombres.iloc[:,0])
Mujeres = list(Mujeres.iloc[:,0])
Nombres = Hombres + Mujeres
Musica = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/Music.csv')
Music = | pd.DataFrame(Musica) | pandas.DataFrame |
#coding=utf8
import sys
import json
import pandas as pd
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: dureader_to_msmarco.py <inputfile> <outputfile>")
exit()
else:
df = | pd.DataFrame() | pandas.DataFrame |
"""Test solvent-accessible surface area methods."""
import logging
import json
from pathlib import Path
import pytest
import yaml
from scipy.stats import linregress
import numpy as np
import pandas as pd
from osmolytes.sasa import SolventAccessibleSurface, ReferenceModels
from osmolytes.pqr import parse_pqr_file, Atom, aggregate, count_residues
_LOGGER = logging.getLogger(__name__)
with open("tests/data/alkanes/alkanes.json", "rt") as json_file:
ATOM_AREAS = json.load(json_file)
PROTEIN_PATH = Path("tests/data/proteins")
@pytest.mark.parametrize("radius", [0.25, 0.5, 1.0, 2.0, 4.0])
def test_one_sphere_sasa(radius, tmp_path):
"""Test solvent-accessible surface areas for one sphere."""
atom = Atom()
atom.position = np.random.randn(3)
frac = np.random.rand(1)[0]
atom.radius = frac * radius
probe_radius = (1.0 - frac) * radius
xyz_path = Path(tmp_path) / "sphere.xyz"
sas = SolventAccessibleSurface(
[atom], probe_radius, 200, xyz_path=xyz_path
)
atom_sasa = sas.atom_surface_area(0)
ref_sasa = 4.0 * np.pi * radius * radius
_LOGGER.info(
f"Radius: {radius}, Test area: {atom_sasa}, Ref area: {ref_sasa}"
)
np.testing.assert_almost_equal(atom_sasa, ref_sasa)
def two_sphere_area(radius1, radius2, distance):
"""Area of two overlapping spheres.
:param float radius1: radius of sphere1
:param float radius2: radius of sphere2
:param float distance: distance between centers of spheres
:returns: exposed areas of spheres
:rtype: (float, float)
"""
distsq = distance * distance
rad1sq = radius1 * radius1
rad2sq = radius2 * radius2
full_area1 = 4 * np.pi * rad1sq
full_area2 = 4 * np.pi * rad2sq
if distance > (radius1 + radius2):
return (full_area1, full_area2)
elif distance <= np.absolute(radius1 - radius2):
if full_area1 > full_area2:
return (full_area1, 0)
if full_area1 < full_area2:
return (0, full_area2)
else:
return (0.5 * full_area1, 0.5 * full_area2)
else:
if radius1 > 0:
cos_theta1 = (rad1sq + distsq - rad2sq) / (2 * radius1 * distance)
cap1_area = 2 * np.pi * radius1 * radius1 * (1 - cos_theta1)
else:
cap1_area = 0
if radius2 > 0:
cos_theta2 = (rad2sq + distsq - rad1sq) / (2 * radius2 * distance)
cap2_area = 2 * np.pi * radius2 * radius2 * (1 - cos_theta2)
else:
cap2_area = 0
return (full_area1 - cap1_area, full_area2 - cap2_area)
@pytest.mark.parametrize("radius", [0.0, 1.1, 2.2, 4.4, 6.6, 8.8])
def test_two_sphere_sasa(radius, tmp_path):
"""Test solvent accessible surface areas for two spheres."""
atom_tolerance = 0.02
total_tolerance = 0.02
probe_radius = 0.0
big_atom = Atom()
big_atom.radius = radius
big_atom.position = np.array([0, 0, 0])
little_atom = Atom()
little_atom.radius = 1.0
test_atom_areas = []
test_total_areas = []
ref_atom_areas = []
ref_total_areas = []
distances = np.linspace(0, (big_atom.radius + little_atom.radius), num=20)
for distance in distances:
_LOGGER.debug("Distance = %g", distance)
little_atom.position = np.array(3 * [distance / np.sqrt(3)])
xyz_path = Path(tmp_path) / f"spheres-{distance}.xyz"
sas = SolventAccessibleSurface(
[big_atom, little_atom], probe_radius, 300, xyz_path=xyz_path
)
test = np.array([sas.atom_surface_area(0), sas.atom_surface_area(1)])
test_total_areas.append(test.sum())
test_atom_areas.append(test)
ref = np.array(
two_sphere_area(big_atom.radius, little_atom.radius, distance)
)
ref_total_areas.append(ref.sum())
ref_atom_areas.append(ref)
test_atom_areas = np.array(test_atom_areas)
test_total_areas = np.array(test_total_areas)
ref_atom_areas = np.array(ref_atom_areas)
ref_total_areas = np.array(ref_total_areas)
rel_difference = np.absolute(
np.divide(test_atom_areas - ref_atom_areas, np.sum(ref_atom_areas))
)
errors = []
if np.any(rel_difference > atom_tolerance):
ref_series = | pd.Series(index=distances, data=ref_total_areas) | pandas.Series |
"""
Created on Mon May 30 2020
@author: evadatinez
"""
from MyAIGuide.data.complaintsData import complaintsData
import numpy as np
from pathlib import Path
import pandas as pd
fname = 'data/raw/ParticipantData/Participant8Anonymized'
# create empty (full of 0s) test dataframe
i = pd.date_range('2015-11-19', periods=1550, freq='1D')
sLength = len(i)
empty = pd.Series(np.zeros(sLength)).values
d = {
'complaintsAwesomeDay': empty,
'complaintsLoneliness': empty,
'complaintsPoorSleep': empty,
'complaintsSadness': empty,
'complaintsStress': empty,
'complaintsTired': empty,
'complaintsWorriedAnxious': empty,
'anotherNonRelevantColumn': empty
}
test_data = pd.DataFrame(data=d, index=i)
# update it with complaints data
test_data = complaintsData(fname=fname, data=test_data)
# read csv directly to compare results
path = Path(fname + '/Participant8Observations.csv')
# read csv from path
df_csv = | pd.read_csv(path) | pandas.read_csv |
"""
References
- https://github.com/codertimo/BERT-pytorch
- http://freesearch.pe.kr/archives/4963
Sample data
- Yelp reviews Polarity from below page
https://course.fast.ai/datasets
References
https://medium.com/swlh/a-simple-guide-on-using-bert-for-text-classification-bbf041ac8d04
- Naver movie review
https://github.com/e9t/nsmc/
Steps
0. follow all steps of __main__.py
1. dataset > vocab.py > build()
"""
import tqdm
import pandas as pd
from torch.utils.data.dataloader import DataLoader
import argparse
from bert_codertimo_pytorch.dataset.vocab import WordVocab
from bert_codertimo_pytorch.dataset import BERTDataset
from bert_codertimo_pytorch.model import BERT
from bert_codertimo_pytorch.trainer import BERTTrainer
"""
Data pre-processing
"""
data_base_path = 'dataset/yelp_review_polarity_csv/'
test_data_file = data_base_path + 'test.csv' # 38000
train_data_file = data_base_path + 'train.csv' # 560000
test_bt_file = test_data_file.replace('.csv', '_bt.csv')
train_bt_file = train_data_file.replace('.csv', '_bt.csv')
def save_df_to_bt_format(input_file_path, output_file_path, top=None):
"""
>>> save_df_to_bt_format(test_data_file, test_bt_file, top=100)
>>> save_df_to_bt_format(train_data_file, train_bt_file, top=1000)
"""
df = pd.read_csv(input_file_path, header=None, names=['label', 'txt'])
sent_pairs = ['\t'.join(line_split[:2])
for line_split in df.txt.map(lambda x: x[:-1].split('. ')) if len(line_split) >= 2]
bt = | pd.DataFrame({'txt': sent_pairs}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import math
from copy import deepcopy
from sklearn.metrics import davies_bouldin_score as dbindex
class Reward_Function:
def __init__(self, max_dist):
self.max_dist = max_dist
pass
def reward_function(self, df, k, total_data_size, obs, action, done):
reward = 0
centroids = self.gen_mean_coords(df, k)
num_clusters = len(df['action'].unique().tolist())
if(done == True):
num_clusters = len(df['action'].unique().tolist())
num_in_clusters = df['action'].value_counts().to_list()
for i in range(k - num_clusters):
num_in_clusters.append(0)
max_val = np.prod(num_in_clusters)/total_data_size**k
if num_clusters == k:
accuracy = dbindex(df[df.columns.drop('action')], df['action'])
else:
accuracy = 1e+10
reward = - 2 * math.log10(accuracy) - \
k**(-k)/(1 + max_val)
else:
dist = np.linalg.norm(np.array(obs) -
np.array(centroids[action-1]))
accuracy = dist / self.max_dist
reward = 0
if reward > 100:
reward = 100
elif reward < -100:
reward = -100
return reward, accuracy
def gen_mean_coords(self, df, k):
centroids = []
for i in range(1, k+1):
temp_df = df[df['action'] == i]
temp_df = temp_df.drop(columns=['action'])
centroid = []
for col in temp_df.columns:
centroid.append(temp_df[col].mean())
centroids.append(centroid)
return centroids
def gen_table(self, coordinates, data):
df = deepcopy(data)
data = data.drop(columns=['action'])
dist = | pd.DataFrame() | pandas.DataFrame |
from unittest import TestCase
import sklearn_pmml_model
from sklearn_pmml_model.tree import PMMLTreeClassifier, PMMLTreeRegressor
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml import sklearn2pmml
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from io import StringIO
import pandas as pd
import numpy as np
from os import path, remove
from sklearn.datasets import load_digits
BASE_DIR = path.dirname(sklearn_pmml_model.__file__)
class TestTree(TestCase):
def test_invalid_tree(self):
with self.assertRaises(Exception) as cm:
PMMLTreeClassifier(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
</PMML>
"""))
assert str(cm.exception) == 'PMML model does not contain TreeModel.'
def test_fit_exception(self):
with self.assertRaises(Exception) as cm:
pmml = path.join(BASE_DIR, '../models/tree-iris.pmml')
clf = PMMLTreeClassifier(pmml=pmml)
clf.fit(np.array([[]]), np.array([]))
assert str(cm.exception) == 'Not supported.'
def test_unsupported_predicate(self):
with self.assertRaises(Exception) as cm:
PMMLTreeClassifier(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
<TreeModel splitCharacteristic="binarySplit">
<Node id="1">
<True/>
<Node id="2" score="setosa">
<UnsupportedPredicate/>
</Node>
<Node id="3" score="versicolor">
<UnsupportedPredicate/>
</Node>
</Node>
</TreeModel>
</PMML>
"""))
assert str(cm.exception) == 'Unsupported tree format: unknown predicate' \
' structure in Node 2'
def test_tree_threshold_value(self):
clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/tree-cat-pima.pmml'))
assert clf.tree_.threshold[0] == [0, 4]
assert np.allclose(clf.tree_.threshold[1:], [25.18735, -2, 125.5, -2, -2, 20.02033, -2, -2])
def test_more_tags(self):
clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/tree-cat-pima.pmml'))
assert clf._more_tags() == DecisionTreeClassifier()._more_tags()
class TestIrisTreeIntegration(TestCase):
def setUp(self):
pair = [0, 1]
data = load_iris(as_frame=True)
X = data.data
y = data.target
y.name = "Class"
self.test = (X, y)
pmml = path.join(BASE_DIR, '../models/tree-iris.pmml')
self.clf = PMMLTreeClassifier(pmml=pmml)
self.ref = DecisionTreeClassifier(random_state=1).fit(X, y)
def test_predict(self):
Xte, _ = self.test
assert np.array_equal(self.ref.predict(Xte), self.clf.predict(Xte))
def test_predict_proba(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
self.clf.predict_proba(Xte)
)
def test_score(self):
Xte, yte = self.test
assert self.ref.score(Xte, yte) == self.clf.score(Xte, yte)
def test_sklearn2pmml(self):
# Export to PMML
pipeline = PMMLPipeline([
("classifier", self.ref)
])
pipeline.fit(self.test[0], self.test[1])
sklearn2pmml(pipeline, "tree-sklearn2pmml.pmml", with_repr = True)
try:
# Import PMML
model = PMMLTreeClassifier(pmml='tree-sklearn2pmml.pmml')
# Verify classification
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
model.predict_proba(Xte)
)
finally:
remove("tree-sklearn2pmml.pmml")
class TestDigitsTreeIntegration(TestCase):
def setUp(self):
data = load_digits()
X = pd.DataFrame(data.data)
y = pd.Series(np.array(data.target_names)[data.target])
y.name = "Class"
X, Xte, y, yte = train_test_split(X, y, test_size=0.33, random_state=123)
self.test = (Xte, yte)
self.clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/tree-digits.pmml'))
self.ref = DecisionTreeClassifier(random_state=1).fit(X, y)
def test_predict(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict(Xte),
self.clf.predict(Xte)
)
def test_predict_proba(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
self.clf.predict_proba(Xte)
)
def test_score(self):
Xte, yte = self.test
assert self.ref.score(Xte, yte) == self.clf.score(Xte, yte)
class TestCategoricalTreeIntegration(TestCase):
def setUp(self):
self.clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/tree-cat.pmml'))
def test_predict(self):
Xte = np.array([[0], [1], [2]])
assert np.array_equal(
np.array(['class1', 'class2', 'class3']),
self.clf.predict(Xte)
)
class TestCategoricalPimaTreeIntegration(TestCase):
def setUp(self):
df = pd.read_csv(path.join(BASE_DIR, '../models/categorical-test.csv'))
cats = np.unique(df['age'])
df['age'] = | pd.Categorical(df['age'], categories=cats) | pandas.Categorical |
# -*- coding: utf-8 -*-
# /home/smokybobo/opt/repos/git/personal/loadlimit/test/unit/stat/test_tmp.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Tempy"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import asyncio
from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor
from functools import partial
# Third-party imports
import pandas as pd
from pandas import DataFrame, Series, Timestamp
import pytest
import uvloop
# Local imports
from loadlimit.core import BaseLoop
from loadlimit.event import NoEventTasksError, timedata, shutdown
from loadlimit.stat import timecoro
from loadlimit.util import aiter
# ============================================================================
# Globals
# ============================================================================
pytestmark = pytest.mark.usefixtures('testlogging')
# ============================================================================
# Helpers
# ============================================================================
class Period(defaultdict):
"""Store time series data by key"""
def __init__(self, *args, **kwargs):
super().__init__(list, *args, **kwargs)
self.numdata = 0
self.start_date = None
self.end_date = None
def total(self):
"""Calculate the total number of data points are stored"""
ret = sum(len(s) for slist in self.values()
for s in slist)
self.numdata = ret
return ret
async def atotal(self):
"""Async total calculator"""
ret = 0
async for slist in aiter(self.values()):
async for s in aiter(slist):
ret = ret + len(s)
self.numdata = ret
return ret
def dataframe(self, key, startind=0):
"""Create a dataframe from a stored list of series"""
slist = self[key]
index = list(range(startind, startind + len(slist)))
return DataFrame(slist, index=index)
def clearvals(self, key=None):
"""Clear list of given key
If key is None, clears list of all keys.
"""
if key is not None:
self[key] = []
else:
for key in self:
self[key] = []
self.numdata = 0
async def aclearvals(self, key=None):
"""Async version of clearvals()"""
if key is not None:
self[key] = []
else:
async for key in aiter(self):
self[key] = []
self.numdata = 0
def hdf5_results(store, statsdict):
"""Create results from hdf5 store"""
# Dates
start = statsdict.start_date
end = statsdict.end_date
# Duration (in seconds)
duration = (end - start).total_seconds()
results = {}
index = ['Total', 'Median', 'Average', 'Min', 'Max', 'Rate']
ResultType = namedtuple('ResultType', [n.lower() for n in index])
for name in statsdict:
key = 'timeseries/{}'.format(name)
# Number of iterations
storeobj = store.get_storer(key)
numiter = storeobj.nrows
df = store[key]
delta = df['delta']
r = [numiter]
for val in [delta.median(), delta.mean(), delta.min(),
delta.max()]:
r.append(val.total_seconds() * 1000)
r.append(numiter / duration)
r = ResultType(*r)
results[name] = Series(r, index=index)
dfindex = list(sorted(results, key=lambda k: k))
vals = [results[v] for v in dfindex]
df = | DataFrame(vals, index=dfindex) | pandas.DataFrame |
#!/usr/bin/env python3
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import seaborn as sn
import column_names as cols
file_formats = ["pdf", "svg"]
def save(name):
if not os.path.isdir("figs"):
os.mkdir("figs")
for fmt in file_formats:
plt.savefig(
"figs/%s.%s" % (name, fmt),
bbox_inches='tight',
pad_inches=0,
transparent=True,
)
# entire community
df = pd.read_csv("data/spack-user-survey-2020-responses.csv")
# get reasonable column names
df.columns = [cols.description_to_name[c.strip()] for c in df.columns]
# just members of ECP
ecp = df[df.in_ecp == "Yes"]
#
# Are you part of ECP?
#
ax = df.in_ecp.value_counts().plot.pie(
figsize=(6,4),
fontsize=12,
autopct=lambda p: ("%.1f%%" % p) if p > 4 else "",
explode=[0.05] * 2,
ylabel='',
legend=False,
labels=[''] * 2,
pctdistance=0.7,
title=cols.names["in_ecp"],
textprops={'color':"w"}
)
ax.legend(loc="lower left", fontsize=12, bbox_to_anchor=(-.2, 0),
frameon=False, labels=["All", "ECP"])
save("pie_in_ecp")
#
# Pie charts
#
def two_pies(col, legend_cols=2, same=False):
"""Plot two pie charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
legend_cols (int): number of columns in the legend
same (bool): whether ECP results were pretty much the same as all (in
which case we omit the ECP-specific ones)
"""
plt.close()
combined = pd.DataFrame()
combined["All"] = df[col].value_counts()
if not same:
combined["ECP"] = ecp[col].value_counts()
axes = combined.plot.pie(
subplots=True,
layout=(1, 2),
figsize=(8, 8),
fontsize=12,
autopct=lambda p: ("%.1f%%" % p) if p > 4 else "",
explode=[0.05] * len(combined),
legend=False,
labels=[''] * combined.shape[0],
ylabel='',
pctdistance=0.7,
title=cols.names[col],
textprops={'color':"w"}
)
plt.tight_layout()
axes[0][0].set_title("All\n(ECP responses were similar)")
if not same:
axes[0][0].set_title("All")
axes[0][1].set_title("ECP")
axes[0][0].get_figure().subplots_adjust(top=1.3)
axes[0][0].legend(
ncol=legend_cols,
bbox_to_anchor=(0, 0),
loc="upper left",
labels=combined.index,
fontsize=12,
frameon=False,
)
save("two_pies_" + col)
two_pies("user_type")
two_pies("workplace")
two_pies("country", legend_cols=3)
two_pies("how_find_out")
two_pies("how_bad_no_py26", legend_cols=1)
two_pies("how_bad_only_py3", legend_cols=1)
two_pies("would_attend_workshop")
two_pies("did_tutorial")
two_pies("how_often_docs")
two_pies("commercial_support")
#
# Simple bar charts
#
def two_bars(col):
"""Plot two bar charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
"""
plt.close()
combined = pd.DataFrame()
combined["All"] = df[col].value_counts(sort=False)
combined["ECP"] = ecp[col].value_counts(sort=False)
axes = combined.plot.bar(
subplots=True,
layout=(1, 2),
figsize=(8, 3),
fontsize=12,
legend=False,
ylabel='',
xlabel="at least N years",
title=cols.names[col],
)
plt.tight_layout()
axes[0][0].set_title("All")
axes[0][1].set_title("ECP")
save("two_bars_" + col)
# not pie charts
two_bars("how_long_using")
#
# Multi-choice bar charts
#
def two_multi_bars(col, sort=None, index=None, filt=None, name=None,
figsize=(5, 4)):
"""Plot two bar charts to compare all responses with ECP responses.
Args:
col (str): name of column to compare
index (list): custom index for plot
filt (callable): optional function to filter column by
name (str): name for the figure
figsize (tuple): dimensions in inches for the figure
"""
if filt is None:
filt = lambda x: x
if name is None:
name = col
plt.close()
combined = pd.DataFrame(index=index)
split = filt(df[col].str.split(',\s+', expand=True))
combined["All"] = split.stack().value_counts()
combined["All"] /= df.shape[0]
combined["All"] *= 100
split = filt(ecp[col].str.split(',\s+', expand=True))
combined["ECP"] = split.stack().value_counts()
combined["ECP"] /= ecp.shape[0]
combined["ECP"] *= 100
if not index:
combined = combined.sort_values(by="All", ascending=True)
ax = combined.plot.barh(
figsize=figsize,
legend=True,
title=cols.names[col],
)
ax.legend(loc="lower right", fontsize=12, frameon=False)
plt.xlabel("Percent of respondents")
plt.tight_layout()
save("two_multi_bars_" + name)
two_multi_bars("app_area", figsize=(5, 5))
two_multi_bars("how_contributed")
two_multi_bars("spack_versions",
filt=lambda df: df.replace("Not sure. ", "do not know").replace(
"Do not know", "do not know"))
two_multi_bars("os", filt=lambda df: df.replace(
"Windows Subsystem for Linux (WSL)", "WSL"))
two_multi_bars("python_version",
index=['2.6', '2.7', '3.5', '3.6', '3.7', '3.8'])
two_multi_bars("how_use_pkgs", figsize=(6, 5), filt=lambda df: df.replace(
["Environment Modules (TCL modules)"], "TCL Modules"))
two_multi_bars(
"used_features",
filt=lambda df: df.replace(r' \([^)]*\)', '', regex=True).replace(
"Concretization preferences in packages.yaml",
"Concretization preferences"
).replace("Externals in packages.yaml", "External packages"),
figsize=(6, 5))
two_multi_bars("cpus_next_year")
two_multi_bars("gpus_next_year")
two_multi_bars("compilers_next_year", figsize=(7, 4))
two_multi_bars("how_get_help")
two_multi_bars(
"num_installations", index=reversed([
"1 - 10", "10 - 100", "100 - 200", "200 - 500", "500-1,000", "> 1,000"
]))
linuxes = [
"Gentoo", "Cray", "Amazon Linux", "Alpine", "TOSS", "Arch",
"Fedora", "SuSE", "Debian", "Ubuntu", "Red Hat", "CentOS",
]
def linuxize(df):
linux = df.replace(linuxes, "Linux").replace(
"Windows Subsystem for Linux (WSL)", "WSL")
is_duplicate = linux.apply(pd.Series.duplicated, axis=1)
return linux.where(~is_duplicate, None)
two_multi_bars("os", filt=linuxize, name="os_simple")
mods = ("Environment Modules (TCL modules)", "Lmod")
def modulize(df):
"""Add another column for "any module system"."""
has_modules = df.apply(lambda ser: ser.isin(mods).any(), axis=1)
mod_col = has_modules.apply(lambda c: "Modules (TCL or Lmod)" if c else None)
frame = pd.concat([df, mod_col], axis=1)
frame = frame.replace(["Environment Modules (TCL modules)"], "TCL Modules")
return frame
two_multi_bars("how_use_pkgs", filt=modulize, name="how_use_pkgs_any",
figsize=(6, 5))
gpus = ("NVIDIA", "AMD", "Intel")
def any_gpu(df):
"""Add another column for "any module system"."""
has_gpu = df.apply(lambda ser: ser.isin(gpus).any(), axis=1)
extra_col = has_gpu.apply(lambda c: "Any GPU" if c else None)
frame = pd.concat([df, extra_col], axis=1)
return frame
two_multi_bars("gpus_next_year", filt=any_gpu, name="gpus_next_year_any")
#
# Multi-choice bar charts
#
def feature_bar_chart(
df, title, name, feature_cols, ratings, xlabels, figsize, rot=25,
ha="right", ymax=None, colors=None):
# value counts for all columns
values = df[feature_cols].apply(
pd.Series.value_counts, sort=False).reindex(ratings).transpose()
ax = values.plot.bar(y=ratings, figsize=figsize, rot=0, color=colors)
ax.legend(ncol=5, labels=ratings, frameon=False)
plt.xticks(rotation=rot)
if ymax:
plt.ylim(0, ymax)
if xlabels:
ax.set_xticklabels(xlabels, ha=ha)
plt.tight_layout()
plt.title(title)
save("feature_bars_" + name)
def score_averages(df, feature_cols, ratings, weights):
"""Calculate average scores for features
Args:
df (DataFrame): data set
feature_cols (list): list of column names to average
ratings (list): values from the feature cols associated w/weights,
e.g. "bad", "ok", "good"
weights (dict): weights associated with ratings, e.g.,
{"bad": 0, "ok": 1, "good": 2}.
"""
values = df[feature_cols].apply(pd.Series.value_counts).reindex(ratings)
w = | pd.Series(weights, index=ratings) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assert_index_equal(bf.columns, other.columns)
self.assert_index_equal(bf.index, other.index)
self.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assert_index_equal(bf.columns, self.frame.columns)
self.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
# TODO should reindex check_names?
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path = 'C:/Users/clain/Documents/Jupyter notes/Trading project/stocks.csv'
def create_df(start,end,symbols):
#Define date range
dates = pd.date_range(start,end)
#create empty DataFrame with dates as index
df_empty = | pd.DataFrame(index=dates) | pandas.DataFrame |
"""-----------------------------------------------------------------------------
Name: Iterate CrowdED
Description: This class is meant to run the crowdsourcing algorithm and get the overall accuracy of certain experiment, it will return two output, the median and the proportion
Created By: <NAME> (<EMAIL>.
Date: 30/01/18
-----------------------------------------------------------------------------"""
import sys
import os
import pandas as pd
import numpy as np
import crowded.method as cr
# coding: utf-8
class Iterate(object):
def __init__(self, variable):
self.variable = variable
def get_accuracy(self, total_tasks=100, total_workers=40, p_hard_t=0.1, p_good_w=0.9,
answers_key=["liver", "blood", "lung", "brain", "heart"],
p_train_t=0.4, workers_per_task=5):
accuracy, proportion = [], []
for i in range(5): #simulations
sys.stdout = open(os.devnull, "w")
try:
algorithm = cr.Compute(total_tasks, total_workers, p_hard_t, p_good_w, answers_key, p_train_t, workers_per_task)
except Exception:
pass
sys.stdout = sys.__stdout__
proportion.append(algorithm.accuracy()[0])
accuracy.append(algorithm.accuracy()[1])
proportion = np.mean(proportion)
accuracy = np.mean(accuracy)
return accuracy, proportion
def train_workers(self, max_value=100):
results = []
for idx, i in enumerate(np.arange(1, max_value, 1)):
try:
results.append((idx, self.get_accuracy(total_workers = i+1)))
except Exception:
pass
return results
def train_tasks(self, max_value=100):
results = []
for idx, i in enumerate(np.arange(1, max_value, 1)):
try:
results.append((idx, self.get_accuracy(total_tasks=i + 1)))
except Exception:
pass
return results
def train_workerspertask(self, max_value=100):
results = []
for idx, i in enumerate(np.arange(3, max_value, 2)):
try:
results.append((idx, self.get_accuracy(workers_per_task=i + 1)))
except Exception:
pass
return results
def train_proportion(self, max_value=1):
results = []
for idx, i in enumerate(np.arange(0, max_value, 0.01)):
try:
results.append((idx, self.get_accuracy(p_train_t = i + 0.1)))
except Exception:
pass
return results
def table(self, max_value=1, number_iterations = 1):
simulations = []
for k in range(number_iterations):
if self.variable == 'total_workers':
results = self.train_workers(max_value)
elif self.variable == 'total_tasks':
results = self.train_tasks(max_value)
elif self.variable == 'workers_per_task':
results = self.train_workerspertask(max_value)
elif self.variable == 'p_train_t':
results = self.train_proportion(max_value)
else:
print('Insert a correct variable name')
simulations.append((k, results))
df_simulations = | pd.DataFrame() | pandas.DataFrame |
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoDeltasWarning,
NonNumpyField,
NonPipelineField,
)
from zipline.utils.numpy_utils import repeat_last_axis
from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_asset_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_asset_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
with_extra_sid = parameterized.expand(asset_infos)
class BlazeToPipelineTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.sids = sids = ord('A'), ord('B'), ord('C')
cls.df = df = pd.DataFrame({
'sid': sids * 3,
'value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
def test_tabular(self):
name = 'expr'
expr = bz.Data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertEqual(
{c.name: c.dtype for c in ds._columns},
{'sid': np.int64, 'value': np.float64},
)
for field in ('timestamp', 'asof_date'):
with self.assertRaises(AttributeError) as e:
getattr(ds, field)
self.assertIn("'%s'" % field, str(e.exception))
self.assertIn("'datetime'", str(e.exception))
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.Data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertEqual(value.dtype, np.float64)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.Data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""
var * {
sid: ?int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_auto_deltas(self):
expr = bz.Data(
{'ds': self.df,
'ds_deltas': pd.DataFrame(columns=self.df.columns)},
dshape=var * Record((
('ds', self.dshape.measure),
('ds_deltas', self.dshape.measure),
)),
)
loader = BlazeLoader()
ds = from_blaze(expr.ds, loader=loader)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
def test_auto_deltas_fail_warn(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule='warn',
)
self.assertEqual(len(ws), 1)
w = ws[0].message
self.assertIsInstance(w, NoDeltasWarning)
self.assertIn(str(expr), str(w))
def test_auto_deltas_fail_raise(self):
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule='raise',
)
self.assertIn(str(expr), str(e.exception))
def test_non_numpy_field(self):
expr = bz.Data(
[],
dshape="""
var * {
a: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(object.__getattribute__(ds, 'a'), NonNumpyField)
def test_non_pipeline_field(self):
# NOTE: This test will fail if we ever allow string types in
# the Pipeline API. If this happens, change the dtype of the `a` field
# of expr to another type we don't allow.
expr = bz.Data(
[],
dshape="""
var * {
a: string,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_complex_expr(self):
expr = bz.Data(self.df, dshape=self.dshape)
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# Test that we can have complex expressions with no deltas
from_blaze(
expr_with_add,
deltas=None,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
loader=self.garbage_loader,
)
deltas = bz.Data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
)
with self.assertRaises(TypeError):
from_blaze(
expr_with_add,
deltas=deltas,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1,
deltas=deltas,
loader=self.garbage_loader,
)
def test_id(self):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id_macro_dataset(self):
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
asset_info = asset_infos[0][0]
with tmp_asset_finder(asset_info) as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
nassets = len(asset_info)
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
assert_frame_equal(result, expected, check_dtype=False)
def _run_pipeline(self,
expr,
deltas,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
loader=loader,
no_deltas_rule='raise',
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(data, expected_views[today])
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
expected_output,
check_dtype=False,
)
@with_extra_sid
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.sids * 2,
'value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan, np.nan]],
expected_views,
)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
expected_output_buffer,
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
def test_novel_deltas_macro(self):
asset_info = asset_infos[0][0]
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
| pd.Timestamp('2014-01-04') | pandas.Timestamp |
# Library
import pandas as pd
import numpy as np
import datetime as dt
import time,datetime
import math
from math import sin, asin, cos, radians, fabs, sqrt
from geopy.distance import geodesic
from numpy import NaN
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
import sklearn
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from IPython.display import Image
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
import random
from sklearn.ensemble import RandomForestClassifier
import eli5
from eli5.sklearn import PermutationImportance
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,recall_score,f1_score,roc_auc_score,roc_curve
import sys
EARTH_RADIUS=6371
# Common Utilities
def num2date(num):
# Convert eventid in GTD to standard time format
num = str(num)
d = num[:4]+'/'+num[4:6]+'/'+num[6:8]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def num2date_(num):
# Convert time of market data to standard time format
num = str(num)
d = num[:4]+'/'+num[5:7]+'/'+num[8:10]
tmp = dt.datetime.strptime(d, '%Y/%m/%d').date()
return tmp
def get_week_day(date):
day = date.weekday()
return day
def hav(theta):
s = sin(theta / 2)
return s * s
def get_distance_hav(lat0, lng0, lat1, lng1):
# The distance between two points of a sphere is calculated by the haversine formula
# Longitude and latitude convert to radians
lat0 = radians(lat0)
lat1 = radians(lat1)
lng0 = radians(lng0)
lng1 = radians(lng1)
dlng = fabs(lng0 - lng1)
dlat = fabs(lat0 - lat1)
h = hav(dlat) + cos(lat0) * cos(lat1) * hav(dlng)
distance = 2 * EARTH_RADIUS * asin(sqrt(h))
return distance
# Load the population density data - https://sedac.ciesin.columbia.edu/data/set/spatialecon-gecon-v4
def load_eco(filename,country):
basic_ec_file1 = filename
basic_ec = pd.read_excel(basic_ec_file1, country,header=0) # Load the page of Israel
lonlat_list = []
for i in range(basic_ec.shape[0]):
temp = []
temp.append(basic_ec.iloc[i]['LONGITUDE'])
temp.append(basic_ec.iloc[i]['LAT'])
lonlat_list.append(temp)
return lonlat_list
# Make terrorist attack features
def gtd_one_hot(gtd):
# Group the features at daily level
gtd_grouped = gtd.groupby(gtd['Timestamp']).sum()
# Occurrence measure
gtd_grouped['occur_count'] = gtd.groupby(gtd['Timestamp']).size()
# Maintain the max nightlight value each day
gtd_grouped['nightlight'] = gtd.groupby(gtd['Timestamp'])['nightlight'].max()
# Obtain the weekday of certain timestamp
gtd_grouped['week'] = gtd.groupby(gtd['Timestamp'])['week'].mean()
return gtd_grouped
def lag(df,col_name,count):
# Shift the column
for i in range(1,count+1):
df[col_name + '_' + str(i)] = df[col_name].shift(i)
return df
def compute_nl(lon,lat):
# Map certain geographic position to corresponding value of nightlight intensity
round_lon = round((lon+180)*37.5)
round_lat = 6750-round((lat+90)*37.5)
try:
return nl[int(round_lat)][int(round_lon)]
except:
return 0
def contain_or_not(string,list_):
if string in list_:
return 1
else:
return 0
def adjust_week(timestamp,week):
# Adjust the weekend to friday
if week == 5:
return (timestamp+datetime.timedelta(days=2)).strftime("%Y/%m/%d")
elif week == 6:
return (timestamp+datetime.timedelta(days=1)).strftime("%Y/%m/%d")
return timestamp.strftime("%Y/%m/%d")
# Make the market features
def get_market_data(start,end,ref,goal,host,user,password,db):
con = pymysql.connect(host,user,password,db, charset='utf8' )
# Reference Index
cmd1 = "select * from " + ref + " where Timestamp >= " + start + ' and Timestamp <= ' + end
ref_df = pd.read_sql(cmd1, con)
#Goal Index
cmd2 = "select * from " + goal + " where Timestamp >= " + start + ' and Timestamp <= ' + end
goal_df = pd.read_sql(cmd2, con)
return ref_df,goal_df
def get_diff(origin_md,name):
md = origin_md.copy()
str1 = 'logdiff_' + name
str2 = 'twologdiff_' + name
md['close_shift1'] = md['Trade Close'].shift(1)
md['onediff'] = md['Trade Close'].diff()
md['open_shift_minus1'] = md['Trade Open'].shift(-1)
md['twodiff'] = md['open_shift_minus1']-md['close_shift1']
md = md.dropna()
md[str1] = md['onediff']/md['close_shift1'] < 0
md[str2] = md['twodiff']/md['close_shift1'] < 0
md_onediff = pd.DataFrame(md,columns = ['Timestamp',str1]).dropna()
md_twodiff = pd.DataFrame(md,columns = ['Timestamp',str2]).dropna()
return md_onediff,md_twodiff
# Merge terrorist attack features and market features
def diff_merge(gtd_grouped,diff_list):
for i in range(1,len(diff_list)):
diff_feature = pd.merge(diff_list[i-1],diff_list[i],on='Timestamp')
diff_feature = diff_feature.dropna()
diff_feature = pd.merge(gtd_grouped,diff_feature,on='Timestamp',how='right')
return diff_feature
def lag_part(feature,lag_features,lag_numbers):
for i in range(len(lag_features)):
feature = lag(feature,lag_features[i],lag_numbers[i])
return feature
def reset_df(df,target_col,index):
cols = list(df)
cols.insert(index, cols.pop(cols.index(target_col)))
df = df.loc[:, cols]
return df
def final_process(gtd_grouped,diff_list,lag_features,lag_numbers,target_col,future_drop_col):
feature = diff_merge(gtd_grouped,diff_list)
feature.sort_values("Timestamp",inplace=True)
feature = feature.fillna(0)
feature = lag_part(feature,lag_features,lag_numbers)
feature = reset_df(feature,target_col,len(feature.columns.values)-1)
feature = feature.drop(future_drop_col,axis=1)
feature.rename(columns={target_col: 'target'}, inplace = True)
feature = feature.dropna()
return feature
def train_test(features,split_point):
y = list(features['target'])
X = features.drop(['target','Timestamp'],axis=1)
x = X.values
var_list = list(X)
X_train,X_test,Y_train,Y_test = x[:split_point],x[split_point:],y[:split_point],y[split_point:]
return X_train,X_test,Y_train,Y_test,var_list
def pr_(y_test,y_pred):
realminus = 0
predminus = 0
correct = 0
for ii in range(len(y_test)):
if y_test[ii] == True:
realminus += 1
if y_pred[ii] == True:
predminus += 1
if y_test[ii] == True and y_pred[ii] == True:
correct += 1
if predminus == 0:
precision = 1
else:
precision = correct/predminus
recall = correct/realminus
if recall == 0:
precision,recall = 1,0
return correct,predminus,correct,realminus
def split_pos_neg(feature,y_pred):
# Display the performance in days with terrorist attacks and days without terrorist attacks
testset = feature[cut_point:].copy()
testset = testset.reset_index()
pred_content = pd.Series(y_pred)
testset['pred'] = pred_content
testset1 = testset[(testset['occur_count'] >= 1)]
y_pred_ = list(testset1['pred'])
y_test_ = list(testset1['target'])
precision, recall = pr(y_test_,y_pred_)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
print(classification_report(y_test_,y_pred_))
testset1 = testset[(testset['occur_count'] == 0)]
y_pred_ = list(testset1['pred'])
y_test_ = list(testset1['target'])
precision, recall = pr(y_test_,y_pred_)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
print(classification_report(y_test_,y_pred_))
def best_para(x_train,x_val,y_train,y_val):
mf1=0
mins=0
maxd=0
for j in range(5,10):
for i in range(15,32):
clf = tree.DecisionTreeClassifier(min_samples_leaf = i,max_depth = j)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
y_pred_pre = clf.predict_proba(x_val)
precision, recall = pr(y_val,y_pred)
f1 = 2*(precision*recall)/(precision+recall)
if(f1>mf1):
mf1=f1
mins=i
maxd=j
return mf1,mins,maxd
def train_dt(feature,cut_point,samples_leaf=1,depth=100):
x_train,x_test,y_train,y_test,var_list = train_test(feature,cut_point)
y_pred = clf.predict(x_test)
y_pred_pre = clf.predict_proba(x_test,min_samples_leaf = samples_leaf,max_depth = depth)
print(classification_report(y_test,y_pred))
im = clf.feature_importances_
print(im)
precision, recall = pr(y_test,y_pred)
f1 = 2*(precision*recall)/(precision+recall)
print(precision, ' ',recall,' ',f1)
split_pos_neg(feature,y_pred)
def experment_full_sample(feature, cut_point):
# Market Only Baseline - Exp-FS
test = pd.DataFrame(feature, columns=['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target'])
train_dt(test,cut_point)
# Exp-FS
X_train,x_val,Y_train,y_val,var_list = train_test(features[:cut_point],val_cut_point)
_,mins,maxd = best_para(x_train,x_val,y_train,y_val)
train_dt(feature,cut_point)
def experment_terr(feature, cut_point):
# Exp-Terr
feature_ = feature.copy()
feature_ = feature_[(feature_['occur_count'] >= 1)]
val_cut_point_terr = 320
cut_point_terr = 415
## Market Only Baseline - Exp-Terr
test = pd.DataFrame(feature_, columns=['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target'])
train_dt(test,cut_point_terr)
# Exp-Terr
X_train,x_val,Y_train,y_val,var_list = train_test(feature_[:cut_point_terr],val_cut_point_terr)
_,mins,maxd = best_para(x_train,x_val,y_train,y_val)
train_dt(feature_,cut_point_terr)
def one_step_ahead(feature):
# One step ahead - Need to load the terrorist attack data extract from news since startdate
# Merge GTD data prior to that startdata and terrorist attack data extract from news since startdate
gtd_news = pd.read_excel('reuters.xlsx','Israel')
## rechange the load GTD data part
gtd = gtd_original[gtd_original['country'] == 97]
gtd = gtd[gtd['iday']!=0]
gtd['Timestamp'] = gtd['eventid'].apply(num2date)
gtd = gtd[['Timestamp','latitude','longitude','nkill','nwound','city','provstate']]
gtd = gtd.dropna()
startdate = '2007-01-01'
gtd = gtd[gtd['Timestamp'] < dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
gtd_news['Timestamp'] = gtd_news['Timestamp'].apply(num2date_)
gtd = pd.concat([gtd,gtd_news])
feature_all = feature.copy()
feature = feature[feature['occur_count'] != 0]
startdate = '2007-01-01'
feature_train = feature[feature['Timestamp'] < dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
feature_test = feature[feature['Timestamp'] >= dt.datetime.strptime(startdate, '%Y-%m-%d').date()]
test_time = list(feature_test['Timestamp'])
# Market-only baseline and full-feature version for one-step-ahead
fall_count = 0
fall_predict_true = 0
fall_predict_count = 0
fall_predict_count_true = 0
for i in range(len(test_time)):
train_set = pd.concat([feature_train[-feature_train.shape[0]+i:], feature_test[0:i]])
test_set = feature_test[i:i+1]
test_set = test_set.drop([], 1)
# market-only version
# x_train,x_test,y_train,y_test,var_list_market = train_test(train_set[['Timestamp','logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2','target']],train_set.shape[0])
# full-feature version
x_train,x_test,y_train,y_test,var_list = train_test(train_set,train_set.shape[0])
time = str((test_set['Timestamp'].values)[0])
y = list(test_set['target'])
# market-only version
# X = test_set[['logdiff_sp500','logdiff_sp500_1','twologdiff_is100_1','twologdiff_is100_2','logdiff_sp500_2']]
# full-feature version
X = test_set.drop(['target','Timestamp'],axis=1)
# market-only version
# clf = tree.DecisionTreeClassifier()
# full-feature version
clf = tree.DecisionTreeClassifier(min_samples_leaf = 26)
clf.fit(x_train, y_train)
y_pred = clf.predict(X)
if y == [1]:
fall_count += 1
if y_pred == [1]:
fall_predict_true += 1
if y_pred == [1]:
fall_predict_count += 1
if y == [1]:
fall_predict_count_true += 1
plusprecision = fall_predict_count_true/fall_predict_count
plusrecall = fall_predict_true/fall_count
f1 = 2*(plusprecision*plusrecall)/(plusprecision+plusrecall)
print(plusprecision,' ',plusrecall,' ',f1)
print(fall_predict_count_true,' ',fall_predict_count,' ',fall_predict_true,' ',fall_count)
def main(argv):
# Load the population density data - https://sedac.ciesin.columbia.edu/data/set/spatialecon-gecon-v4
lonlat_list = load_eco('basic_eco.xls',"Israel")
# Load the nightlight data - https://eoimages.gsfc.nasa.gov/images/imagerecords/144000/144897/BlackMarble_2016_3km_gray_geo.tif
gray_file = open("nightlight.csv","rb")
nl_tmp = np.loadtxt(gray_file,delimiter=',',skiprows=0)
gray_file.close()
nl = np.array(nl_tmp)
# Load the GTD data - https://www.start.umd.edu/gtd/
gtd_original = pd.read_excel('gtd90_17.xlsx')
gtd = gtd_original[gtd_original['country'] == 97]
gtd = gtd[gtd['iday']!=0]
gtd['Timestamp'] = gtd['eventid'].apply(num2date)
gtd = gtd[['Timestamp','latitude','longitude','nkill','nwound','city','provstate']]
gtd = gtd.dropna()
# capital/cultural center/religious center labels - From Wikipedia
capital = ['Jerusalem','Nazareth','Haifa','Ramla','Tel Aviv','Beersheva']
cultural_center = ['Tel Aviv']
religious_center = ['Jerusalem']
gtd['capital'] = gtd['city'].apply(contain_or_not,args=(capital,))
gtd['cultural_center'] = gtd['city'].apply(contain_or_not,args=(cultural_center,))
gtd['religious_center'] = gtd['city'].apply(contain_or_not,args=(religious_center,))
# One-hot encoding of provstate
gtd = gtd.join(pd.get_dummies(gtd.provstate))
gtd['week'] = gtd['Timestamp'].apply(get_week_day)
gtd['Timestamp'] = gtd.apply(lambda x :adjust_week(x['Timestamp'],x['week']),axis=1)
gtd['Timestamp'] = gtd['Timestamp'].apply(num2date_)
gtd['week'] = gtd['Timestamp'].apply(get_week_day)
gtd['nightlight'] = gtd.apply(lambda row: compute_nl(row['longitude'], row['latitude']), axis=1)
basic_ec[['LAT']] = basic_ec[['LAT']].apply(pd.to_numeric)
basic_ec[['LONGITUDE']] = basic_ec[['LONGITUDE']].apply(pd.to_numeric)
gtd = gtd.reset_index(drop=True)
add_feature = ['POPGPW_2005_40']
gtd = pd.concat([gtd, pd.DataFrame(columns=add_feature)])
for i in range(gtd.shape[0]):
distance = []
lon = gtd.iloc[i]['longitude']
lat = gtd.iloc[i]['latitude']
for j in range(basic_ec.shape[0]):
distance.append(geodesic((lonlat_list[j][1],lonlat_list[j][0]), (lat,lon)))
min_index = distance.index(min(distance))
for j in range(len(add_feature)):
# Calculate the population density
gtd.loc[i,add_feature[j]] = float(basic_ec.iloc[min_index][add_feature[j]]/basic_ec.iloc[min_index]['AREA'])
gtd[add_feature] = gtd[add_feature].apply(pd.to_numeric)
keep_geo = gtd.groupby('Timestamp').first()
gtd_grouped = gtd_one_hot(gtd)
gtd_grouped = gtd_grouped.reset_index('Timestamp')
gtd_grouped['longitude'] = pd.Series(list(keep_geo['longitude']))
gtd_grouped['latitude'] = pd.Series(list(keep_geo['latitude']))
# In order to take normal day into account from 1989/12/31 to 2018/01/01
b = dt.datetime.strptime('1989/12/31', '%Y/%m/%d').date()
ind = []
vi = []
for x in range(12000):
b += pd.Timedelta(days = 1)
if b == dt.datetime.strptime('2018/01/01', '%Y/%m/%d').date():
break
if get_week_day(b) == 5 or get_week_day(b) == 6:
continue
ind.append(b)
vi.append(1)
ts = pd.Series(vi, index = ind)
dict_ts = {'Timestamp':ts.index}
df_day = pd.DataFrame(dict_ts)
gtd_grouped = | pd.merge(gtd_grouped,df_day,how='right') | pandas.merge |
import os
import re
import numpy as np
import pandas as pd
from typing import List, Dict, Sized
from scipy.special import comb
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from scipy.signal import medfilt
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import warnings
import matplotlib.pyplot as plt
import ast
import math
from numpy.lib.stride_tricks import as_strided
import warnings
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=UserWarning, message='Variables are collinear')
__all__ = ["convert_types_in_dict", "moving_window_stride", "window_trapezoidal",
"Record", "split", "record_filter", "filter_transitions", "filter_smart", "filter_recognition",
"vgg_filter",
"data_per_id", "data_per_id_and_date", "all_data_per_id", "prepare_data", "prepare_force_data",
"normalized_confusion_matrix", "plot_confusion_matrix", "StandardScalerPerFeature",
"prepare_pipeline", "normalise_force_data"]
def convert_types_in_dict(xml_dict):
"""
Evaluates all dictionary entries into Python literal structure, as dictionary read from XML file is always string.
If value can not be converted it passed as it is.
:param xml_dict: Dict - Dictionary of XML entries
:return: Dict - Dictionary with converted values
"""
out = {}
for el in xml_dict:
try:
out[el] = ast.literal_eval(xml_dict[el])
except ValueError:
out[el] = xml_dict[el]
return out
def moving_window_stride(array, window, step):
"""
Returns view of strided array for moving window calculation with given window size and step
:param array: numpy.ndarray - input array
:param window: int - window size
:param step: int - step lenght
:return: strided: numpy.ndarray - view of strided array, index: numpy.ndarray - array of indexes
"""
stride = array.strides[0]
win_count = math.floor((len(array) - window + step) / step)
strided = as_strided(array, shape=(win_count, window), strides=(stride*step, stride))
index = np.arange(window - 1, window + (win_count-1) * step, step)
return strided, index
def window_trapezoidal(size, slope):
"""
Return trapezoidal window of length size, with each slope occupying slope*100% of window
:param size: int - window length
:param slope: float - trapezoid parameter, each slope occupies slope*100% of window
:return: numpy.ndarray - trapezoidal window
"""
if slope > 0.5:
slope = 0.5
if slope == 0:
return np.full(size, 1)
else:
return np.array([1 if ((slope * size <= i) & (i <= (1-slope) * size)) else (1/slope * i / size) if (i < slope * size) else (1/slope * (size - i) / size) for i in range(1, size + 1)])
class Record:
def __init__(self, path: str = None):
self.path: str = ""
self.type: str = ""
self.id: str = ""
self.trajectory: str = ""
self.date: str = ""
self.time: str = ""
if str:
self.set_path(path)
def set_path(self, path: str):
experiment_name_regexp = r"^(?P<type>\w*)-(?P<id>\d{2})-(?P<trajectory>\w*)-" \
r"(?P<date>\d{4}-\d{2}-\d{2})-(?P<time>\d{2}-\d{2}-\d{2}-\d{3})"
basename = os.path.basename(path)
tags = re.search(experiment_name_regexp, basename)
if not tags:
raise Warning("Wrong record", path)
else:
self.path = path
self.type = tags.group('type')
self.id = tags.group('id')
self.trajectory = tags.group('trajectory')
self.date = tags.group('date')
self.time = tags.group('time')
def print(self):
for key in self.__dict__.keys():
print(key, "=", self.__dict__[key])
def __repr__(self):
return "-".join([self.type, self.id, self.trajectory, self.date, self.time])
def __str__(self):
return "-".join([self.type, self.id, self.trajectory, self.date, self.time])
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return isinstance(self, type(other)) and self.__repr__() == other.__repr__()
def split(items: Sized, n_splits=None, test_size=0.1, train_size=None, random_state=None):
rng1 = np.random.RandomState(random_state)
if test_size is None and train_size is None:
raise ValueError("Missing test size or train size")
data_count = len(items)
items = set(range(data_count))
if isinstance(train_size, float):
train_size = np.rint(train_size * data_count)
if isinstance(test_size, float):
test_size = np.rint(test_size * data_count)
if train_size is None:
train_size = data_count - test_size
if test_size is None:
test_size = data_count - train_size
train_size = int(train_size)
test_size = int(test_size)
if train_size < 1 or train_size > (data_count - 1):
raise ValueError("Wrong train size: train_size={:d},test_size={:d} out of {:d}".
format(train_size, test_size, data_count))
if test_size < 1 or test_size > (data_count - 1):
raise ValueError("Wrong test size: train_size={:d},test_size={:d} out of {:d}".
format(train_size, test_size, data_count))
n_comb = int(comb(data_count, train_size) * comb(data_count - train_size, test_size))
if n_splits is None:
n_splits = n_comb
if n_splits > n_comb:
warnings.warn("n_splits larger than available ({:d}/{:d})".format(n_splits, n_comb))
n_splits = n_comb
splits = []
while len(splits) < n_splits:
items_train = rng1.choice(list(items), size=train_size, replace=False)
items_left = items.copy()
for it in items_train:
items_left.remove(it)
items_test = rng1.choice(list(items_left), size=test_size, replace=False)
split_candidate = (set(items_train), set(items_test))
if split_candidate not in splits:
splits.append(split_candidate)
return splits
def record_filter(records: List[Record], whitelists: Dict[str, List] = None, blacklists: Dict[str, List] = None):
filtered_records: List[Record] = []
if whitelists is None:
whitelists = {}
if blacklists is None:
blacklists = {}
for r in records:
keep: bool = True
for w_key, w_values in whitelists.items():
if getattr(r, w_key) not in w_values:
keep = False
break
if keep:
for b_key, b_values in blacklists.items():
if getattr(r, b_key) in b_values:
keep = False
break
if keep:
filtered_records.append(r)
return filtered_records
def filter_transitions(trajectory: np.ndarray,
start_before: int = 0, start_after: int = 0,
end_before: int = 0, end_after: int = 0,
pause_before: int = 0, pause_after: int = 0):
trajectory_nan = trajectory.astype('float')
np.putmask(trajectory_nan, trajectory_nan < 0, np.nan)
diffs = np.concatenate(([0], np.diff(trajectory_nan)))
np.putmask(diffs, np.isnan(diffs), 0)
filtered = trajectory.copy()
mask = np.full(trajectory.shape, False)
if start_before > 0:
start_before_mask = np.logical_and(diffs != 0, trajectory > 0)
if start_before > 1:
start_before_mask = binary_dilation(start_before_mask, structure=np.array([1, 1, 0]),
iterations=start_before-1)
mask = np.logical_or(mask, start_before_mask)
if start_after > 0:
start_after_mask = np.logical_and(diffs != 0, trajectory > 0)
start_after_mask = binary_dilation(start_after_mask, structure=np.array([1, 0, 0])) # shift left
if start_after > 1:
start_after_mask = binary_dilation(start_after_mask, structure=np.array([0, 1, 1]),
iterations=start_after-1)
mask = np.logical_or(mask, start_after_mask)
if end_before > 0:
end_before_mask = np.logical_and(diffs != 0, trajectory == 0)
if end_before > 1:
end_before_mask = binary_dilation(end_before_mask, structure=np.array([1, 1, 0]),
iterations=end_before-1)
mask = np.logical_or(mask, end_before_mask)
if end_after > 0:
end_after_mask = np.logical_and(diffs != 0, trajectory == 0)
end_after_mask = binary_dilation(end_after_mask, structure=np.array([1, 0, 0])) # shift left
if end_after > 1:
end_after_mask = binary_dilation(end_after_mask, structure=np.array([0, 1, 1]),
iterations=end_after-1)
mask = np.logical_or(mask, end_after_mask)
filtered[mask] = -5
pause_mask = trajectory == -1
if pause_before > 0:
pause_mask = binary_dilation(pause_mask, structure=np.array([1, 1, 0]), iterations=pause_before)
if pause_after > 0:
pause_mask = binary_dilation(pause_mask, structure=np.array([0, 1, 1]), iterations=pause_after)
filtered[pause_mask] = -6
return filtered
def filter_smart(recognized: np.ndarray, trajectory: np.ndarray,
recognition_median_filter: int = 5,
recognition_tolerance_backward: int = 8,
recognition_tolerance_forward: int = 1,
min_idle_period: int = 7):
trajectory_length = len(recognized)
recognized_median = medfilt(recognized, recognition_median_filter)
idle_mask = binary_erosion(recognized_median <= 0, iterations=int(min_idle_period/2))
idle_mask = binary_dilation(idle_mask, iterations=int(min_idle_period/2))
transitions = np.concatenate(([0], np.diff(idle_mask) != 0))
starts_mask = np.logical_and(transitions, recognized_median != 0)
starts = np.flatnonzero(starts_mask)
ends_mask = np.logical_and(transitions, recognized_median == 0)
ends = np.flatnonzero(ends_mask)
output = np.full(recognized.shape, -4)
np.putmask(output, idle_mask, 0)
np.putmask(output, recognized < 0, recognized.astype('int32'))
for i, s in enumerate(starts.tolist()):
t_idx = np.searchsorted(ends, s+1)
if t_idx < len(ends):
t = ends[t_idx]
else:
t = trajectory_length-1
trajectory_s = max(0, s-recognition_tolerance_backward)
trajectory_t = min(t+recognition_tolerance_forward, trajectory_length-1)
gesture = np.median(recognized[s:t])
if gesture in trajectory[trajectory_s:trajectory_t]:
output[s:t] = gesture
return output
def vgg_filter(recognized: np.ndarray, trajectory: np.ndarray,
recognition_median_filter: int = 7,
recognition_tolerance_early: int = 1,
recognition_tolerance_late: int = 8):
recognized_filtered = medfilt(recognized, recognition_median_filter)
gestures = np.unique(trajectory)
for g in gestures: # reject mistakes outside of tolerance range -> -1
if g != 0:
g_mask = trajectory == g
if recognition_tolerance_early > 0:
g_mask = binary_dilation(g_mask, structure=np.array([1, 1, 0]), iterations=recognition_tolerance_early)
if recognition_tolerance_late > 0:
g_mask = binary_dilation(g_mask, structure=np.array([0, 1, 1]), iterations=recognition_tolerance_late)
np.putmask(recognized_filtered, np.logical_and(recognized_filtered == g, np.logical_not(g_mask)), -1)
return recognized_filtered
def filter_recognition(recognized: np.ndarray, trajectory: np.ndarray, gestures, margin_l: int = 1, margin_r: int = 8):
recognized_filtered = recognized.copy()
for g in gestures:
trajectory_mask = trajectory == g
if margin_l > 0:
trajectory_mask = binary_dilation(trajectory_mask, structure=np.array([1, 1, 0]), iterations=margin_l)
if margin_r > 0:
trajectory_mask = binary_dilation(trajectory_mask, structure=np.array([0, 1, 1]), iterations=margin_r)
recognized_filtered[np.logical_and(recognized == g, ~trajectory_mask)] = -2
return recognized_filtered
def data_per_id(records: List[Record], n_splits: int = None) -> Dict[str, List[Dict[str, List[Record]]]]:
ids = {r.id for r in records}
sets = {}
for i in sorted(ids):
print("id={:}".format(i))
rec_i = record_filter(records, whitelists={"id": [i]})
available_dates = sorted(list({r.date for r in rec_i}))
print("", rec_i)
splits = split(available_dates, n_splits=n_splits, test_size=0.4, random_state=0)
record_splits = []
for train_index, test_index in splits:
train_dates = [available_dates[idx] for idx in train_index]
train_records = record_filter(rec_i, whitelists={"date": train_dates})
test_dates = [available_dates[idx] for idx in test_index]
test_records = record_filter(rec_i, whitelists={"date": test_dates})
record_splits.append({"train": train_records, "test": test_records})
print("train:", train_dates, "test:", test_dates)
sets["{:}".format(i)] = record_splits
return sets
def data_per_id_and_date(records: List[Record], n_splits: int = None):
ids = {r.id for r in records}
sets = {}
for i in sorted(ids):
rec_i = record_filter(records, whitelists={"id": [i]})
available_dates = sorted(list({r.date for r in rec_i}))
for d in available_dates:
s = "{:}/{:}".format(i, d)
rec_i_d = record_filter(rec_i, whitelists={"date": [d]})
splits = split(rec_i_d, n_splits=n_splits, test_size=0.4, random_state=0)
record_splits = []
for train_index, test_index in splits:
train_records = [rec_i_d[i2] for i2 in train_index]
test_records = [rec_i_d[i2] for i2 in test_index]
record_splits.append({"train": train_records, "test": test_records})
sets[s] = record_splits
return sets
def all_data_per_id(records: List[Record]):
ids = {r.id for r in records}
sets = {}
for i in sorted(ids):
rec_i = record_filter(records, whitelists={"id": [i]})
s = "{:}".format(i)
sets[s] = [{"all": rec_i}]
return sets
def prepare_data(dfs: Dict[Record, pd.DataFrame], s: Dict[str, List[Record]], features: List[str], gestures: List[int]):
metadata = ['TRAJ_1', 'type', 'subject', 'trajectory', 'date_time', 'TRAJ_GT', 'VIDEO_STAMP']
dfs_output: Dict[str, pd.DataFrame] = dict()
column_regex = re.compile("^((" + ")|(".join(features) + "))_[0-9]+")
for k, v in s.items():
df_temp = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os, sys
from eternabench.stats import calculate_Z_scores
package_list=['vienna_2', 'vienna_2_60C', 'rnastructure', 'rnastructure_60C', 'rnasoft_blstar','contrafold_2','eternafold_B']
external_dataset_types = pd.read_csv(os.environ['ETERNABENCH_PATH']+'/eternabench/external_dataset_metadata.csv')
RNA_CLASSES = list(external_dataset_types.Class.unique())
EB_CM_bootstraps=pd.DataFrame()
for pkg in package_list:
tmp = pd.read_json(os.environ['ETERNABENCH_PATH']+'/data/ChemMapping/bootstraps/CM_pearson_Dataset_%s_BOOTSTRAPS.json.zip' % pkg)
EB_CM_bootstraps = EB_CM_bootstraps.append(tmp, ignore_index=True)
EB_CM_bootstraps = EB_CM_bootstraps.loc[EB_CM_bootstraps.Dataset=='RYOS_I']
EB_CM_bootstraps['Dataset'] = 'Leppek,2021 In-line-seq'
net_dataset_zscore_stats= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# linkedin_jog_scraping.py
import os
import pandas as pd
from parsel import Selector
from time import sleep
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
if config.get('HEADLESS', 'headless') == 'Yes':
# install webdrive when needed runing headless
opts=webdriver.ChromeOptions()
opts.headless=True
driver = webdriver.Chrome(ChromeDriverManager().install() ,options=opts)
else:
# install webdrive when needed runing browser
driver = webdriver.Chrome(ChromeDriverManager().install())
print('\nExecuting Linkedin Login...')
# driver.get method() will navigate to a page given by the URL address
driver.get('https://www.linkedin.com/login')
# locate email form by element_by_id
username = driver.find_element_by_id('username')
# send_keys() to simulate key strokes
username.send_keys(config.get('LINKEDIN_LOGIN', 'email'))
# locate password form by_class_name
password = driver.find_element_by_id('password')
# send_keys() to simulate key strokes
password.send_keys(config.get('LINKEDIN_LOGIN', 'password'))
# locate submit button by_class_name
log_in_button = driver.find_element_by_class_name('btn__primary--large')
# locate submit button by_xpath
log_in_button = driver.find_element_by_xpath('//*[@type="submit"]')
log_in_button.click()
print('\nStarting Posting Search...')
# driver goest to the jobs page
driver.get('https://www.linkedin.com/jobs/')
sleep(2)
# Start search term
search_job = driver.find_element_by_xpath('//*[@type="text"]')
search_job.send_keys(config.get('LINKEDIN_LOGIN', 'search_term'))
sleep(1)
#search.send_keys(Keys.RETURN)
# location
search_location = driver.find_element_by_xpath('//input[starts-with(@id,"jobs-search-box-location")]')
search_location.send_keys(Keys.COMMAND, 'a') #COMMAND is the mac keyboard control
search_location.send_keys(Keys.BACKSPACE)
search_location.send_keys(config.get('LINKEDIN_LOGIN', 'country'))
search_location.send_keys(Keys.RETURN)
sleep(3)
# Gets the URL from the search result
linkedin_result = driver.current_url
# Scroll job list to the end of first page
recentList = driver.find_elements_by_class_name('jobs-search-results__list-item')
for list in recentList :
driver.execute_script("arguments[0].scrollIntoView();", list)
sleep(0.1)
# Get full list of positions name
position_name = driver.find_elements_by_class_name('job-card-list__title')
position_name = [url.text for url in position_name]
position_name
len(position_name)
# Get listing Company Name
company_name = driver.find_elements_by_css_selector('.job-card-container__company-name')
company_name = [url.text for url in company_name]
company_name
len(company_name)
# Get listing location
job_location = driver.find_elements_by_xpath('//div[starts-with(@class,"artdeco-entity-lockup__caption")]')
job_location = [url.text for url in job_location]
job_location
len(job_location)
# Get full list of links positions
position_link = driver.find_elements_by_css_selector("div.artdeco-entity-lockup__title > a")
position_link = [link.get_attribute("href") for link in position_link]
position_link
len(position_link)
urls_linkedin = []
for lin in position_link:
terminator = lin.index('?')
urls_linkedin.append(lin[:terminator])
if os.path.isfile('opportunities.csv') is True:
opportunities = | pd.read_csv('opportunities.csv') | pandas.read_csv |
import pandas as pd
# Permite Importar dados do Google Drive
from google.colab import drive
drive.mount('/content/drive')
# Caminho para dados do arquivo csv
csv = '/content/drive/My Drive/Colab Notebooks/Alura/aluguel.csv'
dados = pd.read_csv(csv, sep = ";")
dados.head(10)
# Método para filtrar NaN numbers e tranformar em 0
dados = dados.fillna(0)
# Utilizando método query
# Casas com valor do Aluguel abaixo de 5000
dados.query("Valor < 5000 & Tipo == 'Casa'")
# Média do aluguel de casas
dados.query("Tipo == 'Casa'").Valor.mean()
# Tipos de Moradias
Tipos = sorted(tipos_de_dados.unique())
# Cria coluna para Tipos de dados
tipos_de_dados = pd.DataFrame(dados.dtypes,
columns = ['Tipos de Dados'])
# Cria coluna para as variáveis no index
tipos_de_dados.columns.name = 'Variáveis'
# Imprime Tipos de Dados
tipos_de_dados
# Importando dados HTML
df_html = | pd.read_html('https://news.google.com/covid19/map?hl=pt-BR&mid=%2Fm%2F01l_jz&gl=BR&ceid=BR%3Apt-419', decimal=",") | pandas.read_html |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
| Timedelta('16801 days 01:00:00') | pandas.Timedelta |
"""
Contains classes for retrieving data from file (or wherever it's stored)
"""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import List, Union
import h5py
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
@dataclass
class Retriever(ABC):
"""
Abstract parent class for data retrievers
"""
@abstractmethod
def retrieve_data(self) -> pd.DataFrame:
"""
Retrieves data from file
"""
class SPARetriever(Retriever):
"""
Retriever used with SPA test data
"""
def retrieve_data(
self,
filepath: Union[Path, str],
run_name: Union[str, int],
camera_path: str = "camera_test",
NI_DAQ_path: str = "readout",
scan_param: str = None,
muwave_shutter=True,
scan_param_new_name: str = None,
) -> pd.DataFrame:
"""
Reterieves SPA test data from file
"""
# Retrieve camera data
df_CAM = self.retrieve_camera_data(filepath, run_name, camera_path)
# Retrieve DAQ data
df_DAQ = self.retrieve_NI_DAQ_data(
filepath, run_name, NI_DAQ_path, scan_param, muwave_shutter
)
# Merge dataframes
df = df_CAM.merge(df_DAQ, left_index=True, right_index=True)
# If needed, give scan parameter a new name
if scan_param_new_name:
df.rename(mapper={scan_param: scan_param_new_name}, inplace=True, axis=1)
# Remove lines where camera returned all zeros
df = df[~df["CameraData"].apply(lambda x: np.allclose(x, np.zeros(x.shape)))]
# Store run name in metadata of dataframe
df.attrs["run_name"] = run_name
# Return merged dataframe
return df
def retrieve_camera_data(
self, filepath: Union[Path, str], run_name: Union[str, int], camera_path: str
) -> pd.DataFrame:
"""
Loads camera data from hdf file.
"""
# Initialize containers for camera images and their timestamps
camera_data = []
camera_time = []
# If run_name given as an index, get the string version
if type(run_name) == int:
run_name = self.get_run_names(filepath)[run_name]
# Determine the path to data within the hdf file
data_path = f"{run_name}/{camera_path}/PIProEM512Excelon"
# Open hdf file
with h5py.File(filepath, "r") as f:
# Loop over camera images (1 image per molecule pulse)
for dataset_name in f[data_path]:
if "events" not in dataset_name:
n = int(dataset_name.split("_")[-1])
camera_data.append(f[data_path][dataset_name][()])
camera_time.append(f[data_path][dataset_name].attrs[f"timestamp"])
# Convert lists into a dataframe and return it
dataframe = pd.DataFrame(
data={"CameraTime": camera_time, "CameraData": camera_data}
)
return dataframe
def retrieve_NI_DAQ_data(
self,
filepath: Union[Path, str],
run_name: Union[str, int],
NI_DAQ_path: str,
scan_param: str = None,
muwave_shutter=True,
) -> pd.DataFrame:
"""
Retrieves data obtained using the NI5171 PXIe DAQ
"""
# Define which channel on DAQ corresponds to which data
yag_ch = 0 # Photodiode observing if YAG fired
abs_pd_ch = 2 # Photodiode observing absorption outside cold cell
abs_pd_norm_ch = (
3 # Photodiode to normalize for laser intensity fluctuations in absorption
)
rc_shutter_ch = 4 # Tells if rotational cooling laser shutter is open or closed
rc_pd_ch = 5 # Photodiode for checking that rotaional cooling is on
muwave_shutter_ch = 6 # Tells if SPA microwaves are on or off
# Initialize containers for data
DAQ_data = []
DAQ_time = []
DAQ_attrs = []
# If run_name given as an index, get the string version
if type(run_name) == int:
run_name = self.get_run_names(filepath)[run_name]
# Determine path to data within the hdf file
data_path = f"{run_name}/{NI_DAQ_path}/PXIe-5171"
# Open hdf file
with h5py.File(filepath, "r") as f:
# Loop over camera images (1 image per molecule pulse)
for dataset_name in f[data_path]:
if "events" not in dataset_name:
n = int(dataset_name.split("_")[-1])
DAQ_data.append(f[data_path][dataset_name][()])
DAQ_time.append(f[data_path][dataset_name].attrs["ch0 : timestamp"])
DAQ_attrs.append(
{
key: value
for key, value in f[data_path][dataset_name].attrs.items()
}
)
# Convert lists to dataframes
data_dict = {
"YAGPD": [dataset[:, yag_ch] for dataset in DAQ_data],
"AbsPD": [dataset[:, abs_pd_ch] for dataset in DAQ_data],
"AbsNormPD": [dataset[:, abs_pd_norm_ch] for dataset in DAQ_data],
"RCShutter": [dataset[:, rc_shutter_ch] for dataset in DAQ_data],
"RCPD": [dataset[:, rc_pd_ch] for dataset in DAQ_data],
"DAQTime": DAQ_time,
}
# If microwave shutter was used, need that
if muwave_shutter:
data_dict["MicrowaveShutter"] = [
dataset[:, muwave_shutter_ch] for dataset in DAQ_data
]
# If scan parameter was specified, get data for that
if scan_param:
data_dict[scan_param] = [dataset[scan_param] for dataset in DAQ_attrs]
# Convert dictionary to dataframe and return it
dataframe = | pd.DataFrame(data=data_dict) | pandas.DataFrame |
import copy
import datetime
from datetime import datetime, timedelta
import math
import re
import numpy as np
import pandas as pd
from PIL import Image
import plotly.express as px
from plotly.subplots import make_subplots
import streamlit as st
from streamlit import markdown as md
from streamlit import caching
import gsheet
LOCAL = False
def is_unique(s):
a = s.to_numpy() # s.values (pandas<0.24)
return (a[0] == a).all()
def st_config():
"""Configure Streamlit view option and read in credential file if needed check if user and password are correct"""
st.set_page_config(layout="wide")
pw = st.sidebar.text_input("Enter password:")
if pw == st.secrets["PASSWORD"]:
return st.secrets["GSHEETS_KEY"]
else:
return None
@st.cache
def read_data(creds,ws,gs):
"""Read court tracking data in and drop duplicate case numb
ers"""
# try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),ws,gs))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df
# except Exception as e:
# st.write(e)
# return None
def date_options(min_date,max_date,key):
quick_date_input = st.selectbox("Date Input",["Custom Date Range","Previous Week","Previous 2 Weeks","Previous Month (4 weeks)"],0,key=key)
if quick_date_input == "Previous Week":
start_date = (
datetime.today() - timedelta(weeks=1)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous 2 Weeks":
start_date = (
datetime.today() - timedelta(weeks=2)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous Month (4 weeks)":
start_date = (
datetime.today() - timedelta(weeks=4)
).date()
end_date = datetime.today().date()
if quick_date_input == "Custom Date Range":
key1 = key + "a"
key2 = key + "b"
cols = st.beta_columns(2)
start_date = cols[0].date_input("Start Date",min_value=min_date,max_value=max_date,value=min_date,key=key1)#,format="MM/DD/YY")
end_date = cols[1].date_input("End Date",min_value=min_date,max_value=max_date,value=datetime.today().date(),key=key2)#,format="MM/DD/YY")
return start_date,end_date
def filter_dates(df,start_date,end_date,col):
df = df.loc[
(df[col].apply(lambda x: x)>=start_date) &
(df[col].apply(lambda x: x)<=end_date)
]
return df
def agg_cases(df,col,i):
df_r = df.groupby([col,"Case Number"]).count().iloc[:,i]
df_r.name = "count"
df_r = pd.DataFrame(df_r)
df_a = pd.DataFrame(df_r.to_records())
df_r = df_r.groupby(level=0).sum()
df_r["cases"] = df_a.groupby(col)["Case Number"].agg(lambda x: ','.join(x))
return df_r
def agg_checklist(df_r):
df_r["result"]=df_r.index
df_b = pd.concat([pd.Series(row['count'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").sum()
df_a = pd.concat([pd.Series(row['cases'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").agg(lambda x: ", ".join(x))
df_r = df_b.merge(df_a,right_index=True,left_index=True)
return df_r
def convert(x):
try:
return x.date()
except:
return None
def convert_date(df,col):
"""Helper function to convert a col to a date"""
df[col] = pd.to_datetime(df[col]).apply(lambda x: convert(
x))
#convert NaTs to None
df[col] = (
df[col]
.astype(object)
.where(df[col].notnull(), None)
)
return df
def clean_df(df):
"""clean data and convert types for display"""
df.fillna("",inplace=True)
df = df.astype(str)
return df
def court_tracking_data(df,df_s,df_e):
with st.beta_expander("Court Tracking Data"):
#set up UI date filter element
try:
min_date = df["court_date"].min()-timedelta(days=7)
except: #happens when nulls are in court_date column
min_date = df["court_date"].iloc[0]
max_date = datetime.today().date()+timedelta(days=90)
start_date,end_date = date_options(
min_date,max_date,"1"
)
#Filter data by date
df_f = filter_dates(
df,
start_date,
end_date,
"court_date"
)
df_ef = filter_dates(
df_e,
start_date,
end_date,
"date_filed"
)
#get rid of motion hearings now that we have there stats finished
df_fe = df_f[df_f['motion_hearing']!='Motion Hearing']
#Court tracker volunteer stats
cols = st.beta_columns(2)
if cols[0].checkbox("Volunteer Data (click to expand)"):
cols1 = st.beta_columns(2)
cols1[0].markdown("## Volunteer Tracker Data")
cols1[1].markdown("## ")
cols1[0].markdown(f"### :eyes: Number of trackers:\
{len(df_f['Tracker Name'].unique())}")
cols1[0].write(
df_f
.groupby('Tracker Name')['Case Number']
.nunique()
.sort_values(ascending=False)
)
if cols[1].checkbox("Motion Hearing Data (click to expand)"):
motion_hearings(df_f)
#judge data
if cols[0].checkbox("Judge Tracked Data (click to expand)"):
judge_data(df_fe,df_ef)
#Technical problems
if cols[1].checkbox("Technical Difficulties (click to expand)"):
tech_probs(df_fe)
#pie chart data
if cols[0].checkbox("Pie charts (click to expand)"):
pie_chart_build(df_fe)
#all qualitative data
if cols[1].checkbox("All Qualitative Data (click to expand)"):
render_all_qual_data(df_fe)
def pie_chart_build(df_fe):
cols = st.beta_columns(2)
cols[0].markdown("## Pie Charts for Selected Responses")
cols[1].markdown("## ")
#pie chart columns
pie_chart_cols = [
"Final Case Status",
"RRT Referal",
"Appeals Discussed",
# "NPE Discussion",
"NTV Discussed",
"Tenant Representation",
"Plaintiff Representation",
"Poor Conditions Discussed?",
"Tenant Type",
"Eviction Reason",
"Owner or Property Manager Race",
"Tenant Race",
"Property Type", #also include property subsidy?
"Defendant Language",
"Interpretation Provided",
"Digital Divide Issues",
]
pie_chart_qcols = [
["Other Final Status","Dismissal Reason","Other Dismissal Reason","Abated Reason","Other Abated Reason"],
["RRT Details"],
["Appeals Details"],
# ["NPE Date","NPE Comments"],
["NTV Details","NTV Date","NTV Communicated By","Other NTV Communication"],
["Tenants Name","Tenant Attorney","Other Tenant Representation"],
["Owner or Property Manager Name","Attorney Details","Nationwide Details","Other Plaintiff Representative Details","Plaintiff Details"],
["Poor Condition Details"],
["Other Tenancy Details"],
["Breach of Lease","Other Breach of Lease"],
None,
["Other Tenant Race"],
["Property Name","Property Address","Property Managament","Property Details","Unit Size"],
None,
["Langauage Access Comments"],
["Digital Divide Details"]
]
for col,qcols in zip(pie_chart_cols,pie_chart_qcols):
pie_chart(
df_fe,
col,
cols,#display columns
qcols
)
def motion_hearings(df_f):
cols = st.beta_columns(2)
cols[0].markdown("## Motion Hearing Stats/Qualitative Data")
cols[1].markdown("## ")
df = df_f[df_f['motion_hearing']=='Motion Hearing']
cols[0].markdown(f"### Total number of motion hearings: {df['Case Number'].nunique()}")
qual_cols = ["Plaintiff Details","Defendant Details","RRT Referal","RRT Details","Misc Details"]
render_qual_pie(df,cols,qual_cols)
def judge_data(df_f,df_ef):
display_cols = st.beta_columns(2)
display_cols[0].markdown("## Tacked and Filed Case Counts")
display_cols[1].markdown("## ")
#cases tracked by jp and cases filed
df_f["jp"] = df_f["Case Number"].str[1:2]
df_fjp = pd.DataFrame(df_f
.groupby('jp')['Case Number']
.nunique()
# .sort_values(ascending=False)
)
df_ef_jp = pd.DataFrame(df_ef
.groupby('precinct')['case_number']
.nunique()
# .sort_values(ascending=False)
)
df = pd.DataFrame()
for i in range(1,11):
if i % 2 == 1 :
idx = str(int(math.ceil(i/2)))
df.at[i,"case_type"] = f"JP{idx} Cases Tracked"
df.at[i,"Case Count"] = df_fjp.loc[idx,"Case Number"]
else:
idx = str(int(i/2))
df.at[i,"case_type"] = f"JP{idx} Cases Filed "
df.at[i,"Case Count"] = df_ef_jp.loc[idx,"case_number"]
fig = px.bar(df, x='case_type', y='Case Count')
display_cols[0].markdown("### Cases tracked and Filed by JP")
display_cols[0].plotly_chart(fig,use_container_width=True)
display_cols[0].write(df)
#cases tracked by judge
df_fj = (df_f
.groupby('Judge Name')['Case Number']
.nunique()
.sort_values(ascending=False))
fig = px.bar(df_fj,x=df_fj.index,y='Case Number')
display_cols[1].markdown("### Cases tracked by judge")
display_cols[1].plotly_chart(fig,use_container_width=True)
display_cols[1].write(df_fj)
def tech_probs(df_f):
display_col = st.beta_columns(2)
display_col[0].markdown("## Court Technical Difficulties")
display_col[1].markdown("## ")
#technical problems vs cases we watched by jp (technical problems) filter by date (note improvement)
#only care about cases with tech probs
df_f["jp"] = df_f["Case Number"].str[:2]
df = df_f.loc[
(df_f["Technical Problems?"]!="No technical issues") &
(df_f["Technical Problems?"]!="")
]
df_t = (df
.groupby('jp')['Case Number']
.nunique()
)
fig = px.bar(df_t,x=df_t.index,y='Case Number')
display_col[0].markdown("### Court Tech problems by JP")
display_col[0].plotly_chart(fig,use_container_width=True)
#Percentage of cases with problem table by jp
df_tot = (df_f
.groupby('jp')['Case Number']
.nunique()
)
df_tot = df_t.to_frame().merge(df_tot.to_frame(),right_index=True,left_index=True)
df_tot.columns = ["Cases With Tech Probs","Total Tracked Cases"]
df_tot["Percentage"] = round(df_tot["Cases With Tech Probs"]/df_tot["Total Tracked Cases"],2)*100
display_col[0].write(df_tot)
#technical narrative box with all qualitative data
display = [
"<NAME>",
# "Technical Problems?",
"Other technical problems"
]
df = df_f[display]
df = df.groupby("<NAME>").agg(lambda x: ' / '.join(x))
# df["Technical Problems?"] = df["Technical Problems?"].apply(
# lambda x: re.sub(',+', ' ',x)
# )
df["Other technical problems"] = df["Other technical problems"].apply(
lambda x: re.sub('( / )+', ' / ',x)
)
display_col[1].markdown(f"### Qualitative Data")
for idx,row in df.iterrows():
text = ""
for i,col in enumerate(df.columns):
if row[col] != "":
text += row[col] + ", "
display_col[1].markdown(f"**{idx}** {text}")
def judge_data_filings(df_ef):
display_col = st.beta_columns(2)
display_col[0].markdown("## Filings Data")
display_col[1].markdown("## ")
#cases filed by judge
df_ef['precinct'] = 'JP'+df_ef['precinct']
df_efjp = (df_ef
.groupby('precinct')['case_number']
.nunique()
# .sort_values(ascending=False)
)
fig = px.bar(
df_efjp,
x=df_efjp.index,
y='case_number'
)
display_col[0].markdown("### Cases filed by judge")
display_col[0].plotly_chart(fig,use_container_width=True)
def pie_chart(df,col,display,qualitative_data_cols=None):
display[0].markdown(f"### {col} Total Unanswered: {df[df[col]=='']['Case Number'].nunique()+df[df[col]=='Unknown']['Case Number'].nunique()}/{df['Case Number'].nunique()}")
df = df[df[col]!='']
df_pie = df.groupby(col).count()["Case Number"]
df_pie = pd.DataFrame(df_pie)
fig = px.pie(
df_pie,
values="Case Number",
names=df_pie.index,
)
display[0].plotly_chart(fig)
#render qualitative data if passed
if qualitative_data_cols:
qdata_cols_final = []
for qcol in qualitative_data_cols:
if display[0].checkbox(f"See {qcol}"):
qdata_cols_final.append(qcol)
render_qual_pie(df,display,qdata_cols_final)
else:
display[0].write("No qualitative data to display")
def render_qual_pie(df,display,qual_cols):
df.reset_index(inplace=True)
#include defendant and case nunber
qual_cols.append('Case Details')
qual_cols.append('Case Number')
df = df[qual_cols]
df.replace("Unknown","",inplace=True)
for col in df.columns:
if not((col == "Case Details") or (col == "Case Number")):
display[1].markdown(f"### {col}")
for i,entry in enumerate(df[col]):
if entry != "":
display[1].markdown(f"**{df.at[i,'Case Details']}/{df.at[i,'Case Number']}:** {entry}")
def render_all_qual_data(df):
display = st.beta_columns(2)
display[0].markdown("## All Qualitative Data")
display[1].markdown("## ")
cols = [
"Late Reason",
"Other technical problems",
"Other Final Status",
"Dismissal Reason",
"Other Dismissal Reason",
"Abated Reason",
"Other Abated Reason",
"Postponed Date",
"Fee Details",
"Attorney Details",
"Nationwide Details",
"Other Plaintiff Representative Details",
"Plaintiff Details",
"Defendant Details",
"Langauage Access Comments",
"Disability Accomodations Details",
"Digital Divide Details",
"Property Name",
"Property Address",
"Property Managament",
"Property Details",
"COVID Details",
"Poor Condition Details",
"Details About Documents and Evidence Shared with Tenant",
"Other Tenancy Details",
"Late Fees/ Other Arrears",
"Tenant Dispute Amount",
"NTV Details",
"Other NTV Communication",
"CDC Details",
"NPE Comments",
"Appeals Details",
"RRT Details",
"Misc Details",
"Other Breach of Lease",
"Plaintiff Attorney",
"Nationwide Name",
"Other Plaintiff Representation",
"Tenant Attorney",
"Other Tenant Representation",
]
df.reset_index(inplace=True)
#include defendant and case nunber
cols.append('Case Details')
cols.append('Case Number')
df = df[cols]
df.replace("Unknown","",inplace=True)
for col in cols:
if not((col == "Case Details") or (col == "Case Number")):
if display[0].checkbox(f"Qualitative data for {col} (click to expand)"):
display[1].markdown(f"### {col}")
for i,entry in enumerate(df[col]):
if entry != "":
display[1].markdown(f"**{df.at[i,'Case Details']}/{df.at[i,'Case Number']}:** {entry}")
def setting_data(df_s):
#(settings now to ~90 days out)
container = st.beta_container()
cols_container = container.beta_columns(2)
cols = st.beta_columns(2)
days = cols[0].slider(
"Days out?",
0,
90,
90
)
df_sf = filter_dates(
df_s,
datetime.today().date(),
(datetime.today()+timedelta(days=days)).date(),
"setting_date"
)
cols_container[0].markdown(f"### :calendar: Number of Settings \
today-{days} days out: {len(df_sf)}")
df_sf.index = df_sf["case_number"]
cols[0].write(
df_sf[["setting_date","setting_time"]]
)
def judgement_data(dfj):
display = st.beta_columns(2)
display[0].markdown("## Case Outcomes")
display[1].markdown("## ")
#possesion and monetary judgement by jp
#convert to numeric for amounts
dfj["amount_awarded"] = pd.to_numeric(dfj["amount_awarded"])
dfj["poss_awarded"] = dfj["comments"].str.contains("POSS")
#we want to plot data for each precinct on how much was awarded to plaintiffs and how many possesions
#build df for graph
df_graph = pd.DataFrame()
for i in range(1,6):
#possesion break downs
df_graph.at[i,"Possesion Awarded"] = len(dfj.loc[dfj["poss_awarded"]].loc[dfj["precinct"]==str(i)]) #this is not accurate
#amount breakdowns
df_graph.at[i,"Amount Awarded"] = float(dfj.loc[(dfj["precinct"]==str(i)) & (dfj["judgement_for"]=="PLAINTIFF")]["amount_awarded"].sum())
#judgement breakdowns
df_graph.at[i,"Judgment For Plaintiff"] = len(dfj.loc[(dfj["judgement_for"] == "PLAINTIFF") & (dfj["precinct"]==str(i))])
df_graph.at[i,"Judgment For Defendant"] = len(dfj.loc[(dfj["judgement_for"] == "DEFENDANT") & (dfj["precinct"]==str(i))])
df_graph.at[i,"No Judgment"] = len(dfj.loc[(dfj["judgement_for"] == "NO JUDGEMENT") & (dfj["precinct"]==str(i))])
#total number of cases
df_graph.at[i,"Total Number of cases"] = len(dfj.loc[dfj["precinct"]==str(i)])
#bar chart for amount
df_bar = df_graph[["Amount Awarded"]]
fig = px.bar (
df_bar,
x = df_bar.index,
y = "Amount Awarded",
labels={
"index": "Justice of the Peace"
},
orientation = "v",
title = "Amounts Awarded by Precinct"
)
display[0].plotly_chart(fig)
#make pie charts FIGURE OUT HOW TO STOP SORTING THESE
df_pie = df_graph[["Judgment For Plaintiff","Judgment For Defendant","No Judgment"]].T
for i in range(1,6):
df_pc = df_pie[i]
fig = px.pie(
df_pc,
values = df_pc.values,
names = df_pc.index,
color = df_pc.values,
color_discrete_map={"Judgment for Plaintiff":"red","Judgment for Defendant":"green","No Judgment":"blue"},
title = f"Precinct {i} Case Outcomes"
)
display[(i)%2].plotly_chart(fig)
display[0].markdown("### Judgment Data")
df_graph["Amount Awarded"] = df_graph["Amount Awarded"].apply(lambda x: '${:,.2f}'.format(float(x)))
display[0].write(df_graph)
def representation_data(df):
display = st.beta_columns(2)
display[0].markdown("## Representation Information")
display[1].markdown("## ")
df_graph = pd.DataFrame()
for i in range(1,6):
#Representation Break downs
df_graph.at[i,"Plaintiffs Attorneys"] = len(df.loc[(df["attorneys_for_plaintiffs"]!= "PRO SE") & (df["attorneys_for_plaintiffs"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants Attorneys"] = len(df.loc[(df["attorneys_for_defendants"]!= "PRO SE") & (df["attorneys_for_defendants"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Plaintiffs Pro Se"] = len(df.loc[(df["attorneys_for_plaintiffs"]== "PRO SE") & (df["attorneys_for_plaintiffs"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants Pro Se"] = len(df.loc[(df["attorneys_for_defendants"]== "PRO SE") & (df["attorneys_for_defendants"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Plaintiffs No Rep Data"] = len(df.loc[(df["attorneys_for_defendants"]=="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants No Rep Data"] = len(df.loc[(df["attorneys_for_defendants"]=="") & (df["precinct"]==str(i))])
#total number of cases
df_graph.at[i,"Total Number of Cases"] = len(df.loc[df["precinct"]==str(i)])
# display[0].markdown("### Defendant Representation Counts")
display[0].markdown("### Representation Counts")
df_graph = df_graph.astype(int)
display[0].write(df_graph)
# display[0].markdown("### Defendant Representation Counts")
# display[0].write(df_graph[["Defendants Attorneys","Defendants Pro Se","Defendants No Rep","Total Number of Cases"]])
# display[1].markdown("### Plaintiff Representation Counts")
# display[1].write(df_graph[["Plaintiffs Attorneys","Plaintiffs Pro Se","Plaintiffs No Rep","Total Number of Cases"]])
#display[0].markdown("### Representation Bar Graph")
# fig = px.bar(df_graph,x=df_graph.index,y=["Defendants Attorneys","Plaintiffs Attorneys","Defendants Pro Se","Plaintiffs Pro Se"])
#display[0].plotly_chart(fig)
#top plaintiff attorneys
df_a = df[(df["attorneys_for_plaintiffs"]!="PRO SE") & (df["attorneys_for_plaintiffs"]!="")]
df_af = df_a.groupby("attorneys_for_plaintiffs").count()["case_number"].sort_values(ascending=False)
display[0].markdown("### Top Plaintiff Attorneys")
display[0].write(df_af)
def plaintiff_data(df_ef):
#determine top plaintifss
display = st.beta_columns(2)
display[0].markdown("## Top Plaintiffs")
display[1].markdown("## ")
df = df_ef.groupby("plaintiff").count()["case_number"]
df= df.sort_values(ascending=False)
display[0].write(df)
pass
def property_data(df_ef):
display = st.beta_columns(2)
display[0].markdown("## Property Data")
display[1].markdown("## ")
#determine top properties
df_prop = df_ef[["parcel_id","code_complaints_count","code_violations_count","current_property_owner","dba","2016_unit_count","lon","lat"]]
#get rid of unmatched entries
df_prop = df_prop[df_prop["parcel_id"]!=""]
#determine counts
df1 = df_prop.groupby("parcel_id").count()["dba"]
df1.columns = "Eviction Count"
#get rid of duplicate ids since we already counted them
df_props = df_prop.drop_duplicates("parcel_id")
#merge counts back in and create final data frame
df_props = df_props.merge(df1,left_on="parcel_id",right_index=True)
#drop uneeded columns and rename
df_pf = df_props[["dba_x","dba_y","parcel_id"]]
df_pf.columns = ["DBA","Eviction Count","Parcel ID"]
df_pf.sort_values("Eviction Count",ascending=False,inplace=True)
#sort and take top 25
display[0].markdown("## Top Properties by Eviction")
display[0].write(df_pf)
#map properties?
df_props["lon"] = pd.to_numeric(df_props["lon"])
df_props["lat"] = pd.to_numeric(df_props["lat"])
#clean up +/-2 degress is probablt too much
df_props = df_props[(df_props["lat"]>28) & (df_props["lat"]<32)]
df_props = df_props[(df_props["lon"]>-99) & (df_props["lon"]<-95)]
display[1].markdown("### Map of Evictions in Austin")
display[1].map(df_props,9)
def subsidy_data(df_ef):
cols = st.beta_columns(2)
cols[0].markdown("## Property Subsidy Information")
cols[1].markdown("## ")
#HACA is Has Sec 8 Voucher
df = df_ef.loc[(df_ef["HACA"]=="TRUE") | (df_ef["CARES"]=="TRUE") | (df_ef["nhpd_property_id"]!="") ]
df = pd.DataFrame(df
.groupby('parcel_id')['case_number']
.nunique()
)
df_props = df_ef[["dba","parcel_id"]]
df = df.merge(df_props,left_index=True,right_on="parcel_id")
df.drop_duplicates("parcel_id",inplace=True)
df.sort_values("case_number",ascending=False,inplace=True)
# df.columns = ["Cases with Subsidies"]
cols[0].markdown("### Subsidized Properties by Eviction Counts")
cols[0].write(df)
fig = px.bar (
df.iloc[0:10,:],
x = "dba",
y = "case_number",
orientation = "v",
)
cols[1].markdown("### Top 10 Subsidized Properties by Eviction Counts")
cols[1].plotly_chart(fig)
#pie chart subsidy vs not
df = df_ef.loc[(df_ef["HACA"]=="TRUE") | (df_ef["CARES"]=="TRUE") | (df_ef["nhpd_property_id"]!="") ]
df_not = df_ef.loc[(df_ef["HACA"]!="TRUE") & (df_ef["CARES"]!="TRUE") & (df_ef["nhpd_property_id"]=="") ]
df_pie = pd.DataFrame()
df_pie.at["Subsidized","Count"] = len(df)
df_pie.at["Non-Subsidized","Count"] = len(df_not)
fig = px.pie(
df_pie,
values="Count",
names=df_pie.index,
)
cols[0].markdown("### Subsidized Properties Evictions vs. Non-Subsidized Properties")
cols[0].plotly_chart(fig)
def eviction_data(df_e,df_s):
with st.beta_expander("Eviction Data"):
#Settings data
setting_data(df_s)
try:
min_date = (
df_e["date_filed"].min()-timedelta(days=7)
)
except: #happens when nulls are in court_date column
min_date = df_e["date_filed"].iloc[0]
max_date = datetime.today().date()+timedelta(days=90)
start_date,end_date = date_options(
min_date,max_date,"2"
)
#Filter data by date
df_ef = filter_dates(
df_e,
start_date,
end_date,
"date_filed"
)
cols = st.beta_columns(2)
if cols[0].checkbox("Judge Filing Data (click to expand)"):
judge_data_filings(df_ef)
if cols[1].checkbox("Judgement Data (click to expand)"):
judgement_data(df_ef)
if cols[0].checkbox("Plaintiff Data (click to expand)"):
plaintiff_data(df_ef)
if cols[1].checkbox("Representation (Attorney) Data (click to expand)"):
representation_data(df_ef)
if cols[0].checkbox("Property Data (click to expand)"):
property_data(df_ef)
if cols[1].checkbox("Subsidy Data (click to expand)"):
subsidy_data(df_ef)
def render_page(df,df_e,df_s,df_c):
"""Render all page elements except the api key login"""
#Clean data and convert types
df = clean_df(df)
df_s = clean_df(df_s)
df_c = clean_df(df_c)
df_e = clean_df(df_e)
df = convert_date(df,"court_date")
df_s = convert_date(df_s,"setting_date")
df_e = convert_date(df_e,"hearing_date")
df_e = convert_date(df_e,"date_filed") #file date to date
court_tracking_data(df,df_s,df_e)
eviction_data(df_e,df_s)
# st.write(df)
if __name__ == "__main__":
if LOCAL:
df = pd.read_csv("../data/01_Community_lawyer_test_out_final - Backend.csv")
df_e = pd.read_csv("../data/Court_scraper_evictions_archive - evictions_archive.csv")
df_s = pd.read_csv("../data/Court_scraper_eviction_scheduler - eviction_scheduler.csv")
df_c = | pd.read_csv("../data/Court_contact_data_PIR.csv") | pandas.read_csv |
# t-SNE
from sklearn.manifold import TSNE
# PCA
from sklearn.decomposition import PCA
from plotnine import * # from ggplot import *
# Others
import unittest
from cnnseq.CNNSeq2Seq2 import load_cnnseq2seq, get_h0
from cnnseq.CNNSeq2Seq2_main import feats_tensor_input, feats_tensor_audio
import numpy as np
from cnnseq.utils import project_dir_name, ensure_dir
import os
import math
import pandas as pd
__author__ = "<NAME>"
"""
Class to plot visual features, target and obtained audio along with their emotional label
TODO:
HSL input
Visual features (hidden state h0 for layers 1 and 2 of model), shape: (2, 1, 128)
Target audio
MAYBE: Generated audio
"""
CLASS = {0: 'Negative', 1: 'Positive'}
class DataAnalysis:
def __init__(self, data_filename, cnn_pth, cnn_seq2seq_pth, results_dir=''):
self.results_dir = results_dir
ensure_dir(self.results_dir)
self.data = self.load_data(data_filename)
self.cnnseq2seq_model, self.cnnseq2seq_params = self.load_model(cnn_pth, cnn_seq2seq_pth)
def load_data(self, data_filename):
"""
Data contains 4 fields: audio, HSL_data, emotion and text
:param data_filename: filename with data
:return: data
"""
data = np.load(data_filename)
return data
def load_model(self, cnn_pth, cnn_seq2seq_pth):
"""
Load model from CNN and CNN-Seq2Seq pre-trained weights
:param cnn_pth: filename to CNN pre-trained model
:param cnn_seq2seq_pth: filename to CNN-Seq2Seq pre-trained model
:return: CNN-Seq2Seq model
"""
# print("Load CNN-Seq2Seq model for hidden visual features")
cnnseq2seq_model, cnnseq2seq_params = load_cnnseq2seq(cnn_pth, cnn_seq2seq_pth)
return cnnseq2seq_model, cnnseq2seq_params
def hsl_input(self):
hsl_data = self.data['HSL_data']
hsl_data = np.reshape(hsl_data, [np.shape(hsl_data)[0], -1])
return hsl_data
def visual_feats(self):
print("Visual features")
HSL_data = self.data['HSL_data']
visual_input_tensors = feats_tensor_input(HSL_data, data_type='HSL')
audio = self.data['audio']
# Reshape 48,000 -> 8*6,000
audio_n_prediction = self.cnnseq2seq_params['audio_n_prediction']
y_audio_dim = int(math.ceil(np.shape(audio)[1] / audio_n_prediction))
audio = np.reshape(audio, [-1, audio_n_prediction, y_audio_dim])
audio_input_tensors = feats_tensor_audio(audio)
hidden_info = get_h0(self.cnnseq2seq_model, visual_input_tensors, audio_input_tensors, self.cnnseq2seq_params)
h_dim1, h_dim2 = [], []
for h in hidden_info:
h_0 = h[0].squeeze().cpu().detach().numpy() # view(-1, np.shape(hidden_info[0]))
h_1 = h[1].squeeze().cpu().detach().numpy()
h_dim1.append(h_0)
h_dim2.append(h_1)
return h_dim1, h_dim2
def save_data_in_csv(self, X, y, filename='test.csv'):
df = pd.DataFrame(X)
df.to_csv(filename, index=False)
csv_input = | pd.read_csv(filename) | pandas.read_csv |
import pathlib
import json
import math
import numpy as np
import pandas as pd
from typing import Union
from typing import Tuple
import seaborn as sns
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
path=pathlib.Path(data_fname)
if path.exists():
self.data_fname=path
else:
raise ValueError('invalid path')
def read_data(self):
"""Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data= pd.read_json(path_or_buf=self.data_fname,orient='columns')
def show_age_distrib(self) -> Tuple[np.ndarray, np.ndarray]:
"""Calculates and plots the age distribution of the participants.
Returns
-------
hist : np.ndarray
Number of people in a given bin
bins : np.ndarray
Bin edges
"""
ages=self.data.age
bins=np.arange(0,110,10)
hist,bins=np.histogram(ages.values,bins=bins)
_=sns.histplot(data=self.data, x="age",bins=bins)
return hist,bins
def remove_rows_without_mail(self) -> pd.DataFrame:
"""Checks self.data for rows with invalid emails, and removes them.
Returns
-------
df : pd.DataFrame
A corrected DataFrame, i.e. the same table but with the erroneous rows removed and
the (ordinal) index after a reset.
"""
data= | pd.DataFrame(self.data) | pandas.DataFrame |
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset = pd.read_csv(path)
# look at the first five columns
print(dataset.head())
# Check if there's any column which is not useful and remove it like the column id
dataset = dataset.drop(["Id"],1)
# check the statistical description
print(dataset.info())
# --------------
# We will visualize all the attributes using Violin Plot - a combination of box and density plots
import seaborn as sns
from matplotlib import pyplot as plt
#names of all the attributes
cols = dataset.columns
#number of attributes (exclude target)
#x-axis has target attribute to distinguish between classes
x = dataset["Cover_Type"]
#y-axis shows values of an attribute
y = dataset.drop(["Cover_Type"],1)
size = y.columns
#Plot violin for all attributes
for i in size:
sns.violinplot(x=x,y=y[i])
# --------------
import numpy
upper_threshold = 0.5
lower_threshold = -0.5
# Code Starts Here
subset_train = dataset.iloc[:,0:10]
data_corr = subset_train.corr()
sns.heatmap(data_corr,annot=True)
correlation = list(data_corr.unstack().sort_values(kind="quicksort"))
corr_var_list = []
for i in correlation:
if abs(i)>0.5 and i!=1:
corr_var_list.append(i)
print(corr_var_list)
# Code ends here
# --------------
#Import libraries
from sklearn import cross_validation
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Identify the unnecessary columns and remove it
dataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)
X = dataset.drop(["Cover_Type"],1)
Y = dataset["Cover_Type"]
X_train,X_test,Y_train,Y_test = cross_validation.train_test_split(X,Y,test_size=0.2,random_state=0)
# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.
#Standardized
scaler = StandardScaler()
#Apply transform only for continuous data
X_train_temp = scaler.fit_transform(X_train.iloc[:,0:53])
X_test_temp = scaler.fit_transform(X_test.iloc[:,0:53])
#Concatenate scaled continuous data and categorical
X_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,52:]),axis=1)
X_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,52:]),axis=1)
scaled_features_train_df = pd.DataFrame(X_train1)
scaled_features_train_df.columns = X_train.columns
scaled_features_train_df.index = X_train.index
scaled_features_test_df = | pd.DataFrame(X_test1) | pandas.DataFrame |
########################################################################
# Author(s): <NAME>, <NAME>
# Date: 21 September 2021
# Desc: Create PyTorch DataLoader for simulated measurements
########################################################################
import sys, os, csv
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import random
from numpy.random import default_rng
from gnss_lib.sim_gnss import expected_measures
from gnss_lib.utils import datetime_to_tow
from gnss_lib import coordinates as coord
def load_datasets(config, transforms=None):
# Different kinds of simulated datasets each of which has its own folder
# Dataset loader to handle differnt folders. For a heirarchy where we have different files with different entries (different measurement and ephemeris files I think)
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
random.shuffle(dirs)
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
yield Sim_GNSS_Dataset(_conf)
def list_datasets(config, transforms=None):
# Same as the previous but with files
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
ret = []
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
ret.append(Sim_GNSS_Dataset(_conf))
return ret
class Sim_GNSS_Dataset_Snap(Dataset):
def __init__(self, config, transforms=None):
self.root = config['root']
data_dir = config['measurement_dir']
# init_dir = config['initialization_dir']
# info_path = config['info_path']
self.max_open_files = config['max_open_files'] #cache size
self.guess_range = config['guess_range']
self.transform = transforms
# Save number of entries in each file
# self.info = pd.read_csv(os.path.join(self.root, info_path))
# self.timestep_counts = {row['id'] : row['len'] for row in self.info.iterrows()}
self.timestep_counts = {}
self.use_biases = bool(config['use_biases'])
# Save file paths
file_paths = {}
seed_values = {}
for file_path in os.listdir(os.path.join(self.root, data_dir)):
tmp_name = os.path.split(file_path)[1].split(".")[0]
traj_id, seed_id = tmp_name.split("_")
traj_id = int(traj_id)
if traj_id not in file_paths.keys():
file_paths[traj_id] = []
seed_values[traj_id] = []
file_paths[traj_id].append(os.path.join(self.root, data_dir, file_path)) # Done this way to add paths from multiple directories later
seed_values[traj_id].append(int(seed_id))
data = pd.read_csv(os.path.join(self.root, data_dir, file_path))
self.timestep_counts[traj_id] = len(data['t_idx'].unique())
self.meas_file_paths = file_paths
self.seed_values = seed_values
# file_paths = {key : [] for key in self.meas_file_paths.keys()}
# for file_path in os.listdir(os.path.join(self.root, init_dir)):
# tmp_idx = os.path.split(file_path).split(".")[0]
# traj_id, seed_id = tmp_idx.split("_")
# traj_id = int(traj_id)
# file_paths[traj_id].append(file_path) # Done this way to add paths from multiple directories later
# self.init_file_paths = file_paths
# Save number of seeds for each trajectory
self.seed_counts = {key : len(value) for (key, value) in self.meas_file_paths.items()}
self.full_counts = {key: self.seed_counts[key]*self.timestep_counts[key] for key in self.seed_counts.keys()}
self.N_total = sum(self.full_counts.values())
# Precompute indices (mapping from index to where that information is stored. index 899 -> file identifiers)
indices = []
keyList=sorted(self.full_counts.keys())
traj_idx = 0
seed_idx = 0
timestep = 0
for i in range(self.N_total):
key = keyList[traj_idx]
seed = self.seed_values[key][seed_idx]
indices.append((key, seed, timestep))
timestep += 1
if timestep>=self.timestep_counts[key]:
timestep = 0
seed_idx += 1
if seed_idx >= self.seed_counts[key]:
seed_idx = 0
traj_idx += 1
self.indices = indices
# Initialize biases
if self.use_biases:
self.biases = {}
def get_files(self, key, seed):
# Cache based manager of data files
if not hasattr(self, 'cache_traj'):
self.cache_traj = dict()
self.cache_times = dict()
# Load Trajectory file
seed_hash = str(key)+"_"+str(seed)
if seed_hash in self.cache_traj.keys():
seed_file = self.cache_traj[seed_hash]
times = self.cache_times[seed_hash]
else:
seed_file = pd.read_csv(self.meas_file_paths[key][self.seed_values[key].index(seed)])
times = seed_file['t_idx'].unique()
if len(self.cache_traj) >= self.max_open_files:
pop_key = list(self.cache_traj.keys())[0]
self.cache_traj.pop(pop_key)
self.cache_times.pop(pop_key)
self.cache_traj[seed_hash] = seed_file
self.cache_times[seed_hash] = times
# # Repeat for Seed file
# seed_hash = str(key)+"_"+str(seed_idx)
# if seed_hash in self.cache_seed.keys():
# seed_file = self.cache_seed[seed_hash]
# else:
# seed_file = pd.read_csv(self.init_file_paths[key][seed_idx])
# if len(self.cache_traj) + len(self.cache_seed) >= self.max_open_files:
# self.cache_seed.pop(list(self.cache_seed.keys())[0])
# self.cache_seed[seed_hash] = seed_file
return seed_file, times
def add_guess_noise(self, true_XYZb):
rng = default_rng()
guess_noise = np.array([rng.uniform(-self.guess_range[0], self.guess_range[0]),
rng.uniform(-self.guess_range[1], self.guess_range[1]),
rng.uniform(-self.guess_range[2], self.guess_range[2]), # x, y, z
rng.uniform(0, self.guess_range[3]), # cdt
rng.uniform(-self.guess_range[4], self.guess_range[4]),
rng.uniform(-self.guess_range[5], self.guess_range[5]),
rng.uniform(-self.guess_range[6], self.guess_range[6]), # vx, vy, vz
rng.uniform(-self.guess_range[7], self.guess_range[7]) # cdt_dot
])
return true_XYZb + guess_noise
def __getitem__(self, idx):
key, seed_idx, timestep = self.indices[idx]
seed_file, times = self.get_files(key, seed_idx)
data = seed_file[seed_file['t_idx']==times[timestep]]
gpsweek, tow = datetime_to_tow( | pd.to_datetime(times[timestep]) | pandas.to_datetime |
import re
import os
import copy
import pandas as pd
from pandas.core.common import flatten as pd_flatten
# Pattern to match numbers in strings with regular expressions.
# From: <NAME>; https://stackoverflow.com/questions/4703390/how-to-extract-a-floating-number-from-a-string
numeric_const_pattern = '[-+]? (?: (?: \d* \. \d+ ) | (?: \d+ \.? ) )(?: [Ee] [+-]? \d+ ) ?'
rx = re.compile(numeric_const_pattern, re.VERBOSE)
# rx.findall("Some example: Jr. it. was .23 between 2.3 and 42.31 seconds")
def get_exp_readme_files(institutes, base_path, experiment_key="TGA"):
"""
This function needs a list of institute labels, e.g. abbreviations of the
institute names, that doubles as the directory names containing the
contributed data by said institute. The base path to where these
directories are located needs to be provided as well. An experiment
keyword needs to be specified that is used in the title of the
section of the README file that describes the desired experiment,
e.g. TGA.
This function iterates over all the institutes and checks if the README
file contains the experiment keyword. If that is the case, the whole
README content is extracted as a list of string and stored in a
dictionary, where the institute labels are the keys to access the
respective lists.
:param institutes: list of institute labels that doubles as directory names
:param base_path: path to the institute directories
:param experiment_key: string, experiment keyword to look for in the
README files
:return: dictionary populated with the content of the README markdown
bullet points of a desired experiment (experiment_key)
"""
print("* Institutes that contributed {} data:".format(experiment_key))
# Initialise collection of README files with desired experiments.
exp_readme_contents = dict()
# Iterate over all contributions by institute.
for institute in institutes:
# Build path to each individual README file.
readme_path = os.path.join(base_path,
institute,
"README.md")
print(" " + institute)
# Open the README file and check if it contains
# the experiment keyword in a markdown title line.
with open(readme_path, encoding='utf8') as f:
for line in f:
if experiment_key in line and "###" in line:
# Read the complete file content.
with open(readme_path, encoding='utf8') as f:
# Create list of string, line by line.
readme_lines = [line.rstrip() for line in f]
# Collect README file content.
exp_readme_contents[institute] = readme_lines
continue # Skip to next file.
return exp_readme_contents
def read_experiment_lines(readme_lines, start_marker_a="TGA",
start_marker_b="###", end_marker="###"):
"""
This function iterates over a list of strings and searches for information
about a desired experiment. The information is found by looking for
sub-strings (markers) that encapsulate the desired information. A
shortened list is returned containing the experiment information.
:param readme_lines: List of string containing text file content
:param start_marker_a: Marker to find the desired experiment
:param start_marker_b: Additional marker to make sure the correct
line is chosen
:param end_marker: Marker to indicate when to stop collecting lines.
:return: list, containing lines of string related to a desired experiment
"""
# Initialise collection of experiment description.
experiment_lines = list()
# Flag to control what lines to collect.
collect_entry = False
# Iterate over all the lines of the file content.
for line in readme_lines:
# Skip empty lines.
if line == '':
continue
if end_marker in line:
if "####" not in line:
# Stop collecting lines after the TGA experiment
# description concluded and a new section starts.
collect_entry = False
if start_marker_a in line and start_marker_b in line:
# Allow collection of lines.
collect_entry = True
if collect_entry is True:
# Collect lines.
experiment_lines.append(line)
return experiment_lines
def read_test_condition_table(experiment_lines):
"""
Takes a list of strings of information on an experiment that also
includes a markdown table with a summary of the experiment. It finds
the table lines and translates them into a Pandas DataFrame.
:param experiment_lines: list of strings containing information on an
experiment including a markdown table
:return: Pandas DataFrame of said table
"""
# Initialise data collection.
pre_process_content = list()
table_content = dict()
# Find and read table.
for line in experiment_lines:
if "|:-" in line:
# Skip lines containing visual markers (horizontal lines).
continue
elif "|" in line:
# Read table lines and separate by columns.
# Ignore first and last character per line, they are empty.
pre_process_content.append(line.split("|")[1:-1])
# print(line.split("|")[1:-1])
# Process content by column.
for col_id, col_label in enumerate(pre_process_content[0]):
# Initialise column.
col_content = list()
# Get all cells per column.
for line_id, line in enumerate(pre_process_content[1:]):
if "Test Label" not in col_label and "File Name" not in col_label:
# Transform string to float.
cell_content = line[col_id].replace("\\", "")
col_content.append(float(cell_content))
else:
# Remove "\\" from file names and experiment labels.
cell_content = line[col_id].replace("\\", "")
# Remove surrounding " " of test label.
cell_content = cell_content.replace(" ", "")
col_content.append(cell_content)
# Collect columns.
table_content[col_label[1:-1]] = col_content
# Return table as Pandas DataFrame.
return pd.DataFrame.from_dict(table_content)
def get_institute(readme_lines):
"""
Takes a list of strings of the README-file content and extract the first
line, which contains the institute label and name. Label and name are both
returned as a list of string.
:param readme_lines: list of strings of the README-file content
:return: list of string with institute label and name
"""
# Read the institute line (skip markdown marker for heading).
institute_line = readme_lines[0][2:]
# Split the institute line into individual elements.
institute_line_parts = institute_line.split(" ")
# Get the institute label and its length (amount of characters).
institute_label_raw = institute_line_parts[-1]
label_raw_len = len(institute_label_raw)
institute_label = institute_label_raw[1:-1]
# From the institute line remove the institute label
# to get the institute name.
institute_name = institute_line[:-(label_raw_len + 1)]
# Return institute label and name as a list.
return [institute_label, institute_name]
def build_tga_dict(experiment_lines, institute_name_info,
exp_table_df, tga_base_dict, material_path):
"""
This function creates a deep copy from a base dictionary of a given
experiment and populates it with the respective values from markdown
bullet points of the README file content.
:param experiment_lines: list of string of the TGA README
:param institute_name_info: list containing the institute label and the
institute name
:param exp_table_df: Pandas DataFrame of the test condition summary table
:param tga_base_dict: dictionary containing the keys for the TGA
experiment, will be (deep) copied and populated
:param material_path: path to the material information in the MaCFP repo,
e.g. "Non-charring\PMMA"
:return: populated (deep) copy of the tga_base_dict
"""
#
experiment_type = "TGA"
experiment_info = {experiment_type: dict()}
institute_label = institute_name_info[0]
institute_name = institute_name_info[1]
repetition_info = {institute_label: dict()}
for test_label in exp_table_df["Test Label"][:]:
# Remove unnecessary spaces.
test_label = test_label.replace(" ", "")
# Get line number of test.
test_idx = exp_table_df[exp_table_df['Test Label'] == test_label].index[0]
# Initialise experiment dictionary and fill in a copy of
# the experiment description template.
test_info = copy.deepcopy(tga_base_dict)
# Set institute name and label.
test_info['laboratory']['label'] = institute_label
test_info['laboratory']['name'] = institute_name
# Get file name
data_file_name = exp_table_df['File Name'][test_idx] + ".csv"
# Build data file path.
data_file_path = os.path.join(material_path.split("\\")[-2],
material_path.split("\\")[-1],
institute_label,
data_file_name)
# Store relative data file path.
test_info['path'] = data_file_path
# Set experiment description items from README.
get_tga_items(md_lines=experiment_lines,
items=test_info)
# Set heating rate.
new_val = exp_table_df['Heating Rate (K/min)'][test_idx]
new_unit = "K/min"
test_info["heating_rate"] = {'value': new_val,
'unit': new_unit}
# Set initial sample mass.
new_val = exp_table_df['Initial Sample Mass (mg)'][test_idx]
new_unit = "mg"
test_info["sample_mass"] = {'value': new_val,
'unit': new_unit}
repetition_info[institute_label][test_label] = test_info
print(data_file_path)
# experiment_info[experiment_type] = repetition_info
return repetition_info # test_info
def utility_build_base_dict(md_lines):
"""
Utility function to build a dictionary from points found in the README
lines. This is intended to process the descriptions of the same
experiment provided by the various contributors to easily find the
different items used in the description. This helps to unify the README
for a specific experiment across all contributors. It is not meant to be
used on a regular basis, but rather during the definition of the
dictionaries for the different experiments during the early stages of the
repository. After the definition of said dictionaries is settled, README
templates are to be created and new contributions should follow that
guidance.
This functions is merely collected for completeness and might be useful
if new types of experiments are included into the repo.
:param md_lines: list of strings of the README-file content for a desired
experiment
:return: dictionary with the different markdown items
"""
# Initialise dictionary to collect the different README items.
exp_data_info = dict()
recent_main_key = None
# Read bullet points of markdown list and transform them
# to dictionary keys.
for line in md_lines:
# Get medium items.
if "* " in line and ": " in line:
new_key = line[2:].split(':')[0].replace(' ', '_').lower()
new_info = line[4:].split(':')[1]
exp_data_info[new_key] = dict()
# print(line)
# Get major items.
elif "* " in line and not ": " in line:
new_key = line[2:].replace(' ', '_').lower()
recent_main_key = new_key
exp_data_info[new_key] = dict()
# print(line)
# Get minor items.
elif " - " in line and ": " in line:
new_key = line[4:].split(':')[0].replace(' ', '_').lower()
new_info = line[4:].split(':')[1]
# print(new_info)
# Add a dictionary to store the item info.
exp_data_info[recent_main_key][new_key] = dict()
elif " - " in line and not ": " in line:
print(' * ERROR - check README layout! * ')
else:
# Catch cases that aren't expected keywords, e.g. empty lines.
new_key = None
# Select for expected keywords.
if new_key is not None:
if "heating_rate" in new_key:
rx.findall(line)
print(rx.findall(line))
print(new_info)
return exp_data_info
# def readme_items(md_lines, items):
# """
# Takes a list of markdown lines (string) of a desired experiment,
# e.g. TGA, and a dictionary of the expected bullet points. It parses the
# lines and extracts the information of the bullet points and stores them
# in the dictionary. Bullet points are distinguished between major, medium
# and minor. Major points are understood as some kind of heading that ties
# multiple minor points together, e.g. description of crucibles. Medium
# points stand on their own and only provide a single piece of
# information, e.g. sample mass.
#
# :param md_lines: list of strings (markdown file lines)
# :param items: dictionary with expected bullet points as keys.
#
# :return: Nothing; the existing dictionary is simply filled with the
# appropriate values for the keys.
# """
#
# # Read bullet points of markdown list and transform them
# # to dictionary keys.
# for line in md_lines:
# # Get medium items.
# if "* " in line and ": " in line:
# new_key = line[2:].split(':')[0].replace(' ', '_').lower()
# new_info = line[4:].split(':')[1]
# # print(line)
#
# # Get major items.
# elif "* " in line and not ": " in line:
# new_key = line[2:].replace(' ', '_').lower()
# recent_main_key = new_key
# # print(line)
#
# # Get minor items.
# elif " - " in line and ": " in line:
# new_key = line[4:].split(':')[0].replace(' ', '_').lower()
# new_info = line[4:].split(':')[1]
# # print(new_info)
#
# else:
# # Catch cases that aren't expected keywords, e.g. empty lines.
# new_key = None
#
# if new_key is not None:
# if "heating_rate" in new_key:
# # Determine how many heating rates were used,
# # create a new dictionary for each. #TODO
#
# # Get all heating rates as list.
# heating_rates = rx.findall(line)
#
# # Get unit.
# heating_rate_unit = line[4:].split(' ')[-1]
# # print('hr unit', heating_rate_unit)
#
# # for heating_rate in rx.findall(line):
# # print(heating_rate)
# # print(rx.findall(line))
# # print(new_info)
#
# if new_key is not None:
#
# # # Set the heating rate.
# # new_val = 10
# # new_unit = "K/min"
# # items["heating_rate"] = {'value': new_val,
# # 'unit': new_unit}
#
# if "initial_temperature" in new_key:
# if not None:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "initial_isotherm" in new_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "maximum_temperature" in new_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "final_isotherm" in new_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "sample_mass" in new_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "sample_geometry" in new_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "calibration_type" in new_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "type" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "volume" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "diameter" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "mass" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "lid" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "note" in new_key and "crucible" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "type" in new_key and "carrier_gas" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "flow_rate" in new_key and "carrier_gas" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info.split(" ")[-2]
# new_unit = new_info.split(" ")[-1]
# else:
# new_val = None
# new_unit = None
#
# items[recent_main_key][new_key] = {'value': new_val,
# 'unit': new_unit}
# # print(new_info.split(" "), recent_main_key)
#
# elif "note" in new_key and "carrier_gas" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "type" in new_key and "instrument" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
# # print(new_info.split(" "), recent_main_key)
#
# elif "note" in new_key and "instrument" in recent_main_key:
# if "None" not in new_info:
# new_val = new_info[1:]
# else:
# new_val = None
#
# items[recent_main_key][new_key] = new_val
def get_tga_items(md_lines, items):
"""
Takes a list of markdown lines (string) of a desired experiment,
e.g. TGA, and a dictionary of the expected bullet points. It parses the
lines and extracts the information of the bullet points and stores them
in the dictionary. Bullet points are distinguished between major, medium
and minor. Major points are understood as some kind of heading that ties
multiple minor points together, e.g. description of crucibles. Medium
points stand on their own and only provide a single piece of
information, e.g. sample mass.
:param md_lines: list of strings (markdown file lines)
:param items: dictionary with expected bullet points as keys.
:return: Nothing; the existing dictionary is simply filled with the
appropriate values for the keys.
"""
# Read bullet points of markdown list and transform them
# to dictionary keys.
for line in md_lines:
# Get medium items.
if "* " in line and ": " in line:
new_key = line[2:].split(':')[0].replace(' ', '_').lower()
new_info = line[4:].split(':')[1]
# print(line)
# Get major items.
elif "* " in line and not ": " in line:
new_key = line[2:].replace(' ', '_').lower()
recent_main_key = new_key
# print(line)
# Get minor items.
elif " - " in line and ": " in line:
new_key = line[4:].split(':')[0].replace(' ', '_').lower()
new_info = line[4:].split(':')[1]
# print(new_info)
else:
# Catch cases that aren't expected keywords, e.g. empty lines.
new_key = None
if new_key is not None:
if "heating_rate" in new_key:
# Determine how many heating rates were used,
# create a new dictionary for each. #TODO
# Get all heating rates as list.
heating_rates = rx.findall(line)
# Get unit.
heating_rate_unit = line[4:].split(' ')[-1]
# print('hr unit', heating_rate_unit)
# for heating_rate in rx.findall(line):
# print(heating_rate)
# print(rx.findall(line))
# print(new_info)
if new_key is not None:
# # Set the heating rate.
# new_val = 10
# new_unit = "K/min"
# items["heating_rate"] = {'value': new_val,
# 'unit': new_unit}
if "initial_temperature" in new_key:
if not None:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "initial_isotherm" in new_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "maximum_temperature" in new_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "final_isotherm" in new_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "sample_mass" in new_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "sample_geometry" in new_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "calibration_type" in new_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "type" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "volume" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "diameter" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "mass" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "lid" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "note" in new_key and "crucible" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "type" in new_key and "carrier_gas" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "flow_rate" in new_key and "carrier_gas" in recent_main_key:
if "None" not in new_info:
new_val = new_info.split(" ")[-2]
new_unit = new_info.split(" ")[-1]
else:
new_val = None
new_unit = None
items[recent_main_key][new_key] = {'value': new_val,
'unit': new_unit}
# print(new_info.split(" "), recent_main_key)
elif "note" in new_key and "carrier_gas" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "type" in new_key and "instrument" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
# print(new_info.split(" "), recent_main_key)
elif "note" in new_key and "instrument" in recent_main_key:
if "None" not in new_info:
new_val = new_info[1:]
else:
new_val = None
items[recent_main_key][new_key] = new_val
def build_major_bullet_point(exp_dict, bullet_point):
"""
This function takes a desired major bullet point and its minor bullet
points from an experiment description dictionary and translates it into
a string. This string is a series of markdown items and can be written to a
text file to be human-readable.
:param exp_dict: dictionary containing the experiment description
:param bullet_point: key (string) for the desired major bullet point
:return: list of string
"""
# Define string nuclei to build README lines.
major_nucleus = "* {}"
minor_nucleus = " - {}: {}"
# Initialise collection of README lines as list of string.
new_lines = list()
# Define major bullet point (heading).
major_bullet = bullet_point.replace('_', ' ').title()
major_bullet = major_nucleus.format(major_bullet)
new_lines.append(major_bullet)
# Define minor bullet points.
for key in exp_dict[bullet_point].keys():
if type(exp_dict[bullet_point][key]) is str:
# Get bullet points that only hold a string, e.g. a note.
value = exp_dict[bullet_point][key]
elif type(exp_dict[bullet_point][key]) is None:
# Get bullet points that contain no information, i.e. None.
value = "None"
else:
if key is 'note':
# Get the text of the note.
value = exp_dict[bullet_point][key]
elif exp_dict[bullet_point][key]['value'] is not None:
# Concatenate value and measurement unit.
value = "{} {}".format(exp_dict[bullet_point][key]['value'],
exp_dict[bullet_point][key]['unit'])
else:
# Write only a single "None", instead one for the value
# and one for the measurement unit.
value = "None"
# Construct and collect bullet point.
new_bullet_point = key.replace('_', ' ').title()
new_line = minor_nucleus.format(new_bullet_point, value)
new_lines.append(new_line)
return new_lines
def build_medium_bullet_point(exp_dict, bullet_point):
"""
This function takes a desired medium bullet point from an
experiment description dictionary and translates it into a string. This
string is a markdown item and can be written to a text file to be
human-readable.
:param exp_dict: dictionary containing the experiment description
:param bullet_point: key (string) for the desired medium bullet point
:return: list of string
"""
# Define string nucleus to build README lines.
medium_nucleus = "* {}: {}"
value_unit_nucleus = "{} {}"
# Read the content of the bullet point.
exp_content = exp_dict[bullet_point]
# Initialise collection of README lines as list of string.
new_lines = list()
# Check if the content is a value with a unit or a description.
if type(exp_content) is dict:
value = exp_content["value"]
unit = exp_content["unit"]
new_bullet_content = value_unit_nucleus.format(value, unit)
else:
new_bullet_content = exp_dict[bullet_point]
# Define medium bullet point (heading).
new_bullet_point = bullet_point.replace('_', ' ').title()
new_line = medium_nucleus.format(new_bullet_point, new_bullet_content)
new_lines.append(new_line)
return new_lines
def build_test_condition_table(test_condition_table_df):
"""
Function to create a markdown table from a Pandas DataFrame.
:param test_condition_table_df: DataFrame to be translated
:return: list of string
"""
# Initialise collection of README lines as list of string.
new_lines = list()
# Define string nucleus to build markdown table lines.
table_line_nucleus = "|"
table_entry_nucleus = "{}|"
# Get column headers.
column_headers = list(test_condition_table_df)
n_columns = len(column_headers)
# Build table header.
table_line = "" + table_line_nucleus
for column_header in column_headers:
table_line += table_entry_nucleus.format(" " + column_header + " ")
new_lines.append(table_line)
# Build table divider.
table_line = "" + table_line_nucleus
for n_column in range(n_columns):
table_line += table_entry_nucleus.format(":---:")
new_lines.append(table_line)
# Build table body.
n_rows = len(test_condition_table_df[column_headers[0]])
# Iterate over lines.
for line in range(n_rows):
table_line = "" + table_line_nucleus
# Iterate over columns.
for column_header in column_headers:
entry = str(test_condition_table_df.iloc[line][column_header])
# entry = entry.replace("_", "\\_")
table_line += table_entry_nucleus.format(" " + entry + " ")
new_lines.append(table_line)
return new_lines
def build_tga(tga_exp, exp_table_df=None):
"""
This functions builds the README lines for the TGA experiments. Heating
rate and sample masses are summarised.
:param tga_exp: dictionary, containing the description of the different
repetitions of the TGA experiments
:param exp_table_df: Pandas DataFrame containing the
test condition summary table
:return: list of string for a new README file
"""
# Initialise collection of README lines as list of string.
tga_readme_lines = list()
# Define string nuclei to build README lines.
exp_header = "### Experimental Conditions, TGA"
tga_readme_lines.append(exp_header)
# Get keys of the different experiments.
exp_keys = list(tga_exp.keys())
# Heating Rates.
heating_rates = list()
# Get all heating rates.
for exp_key in exp_keys:
heating_rates.append(tga_exp[exp_key]["heating_rate"]["value"])
# Remove duplicates.
heating_rates = list(dict.fromkeys(heating_rates))
# Build the summary of different heating rates.
part_one = "{}".format(heating_rates[0])
for heating_rate in heating_rates[1:-1]:
part_one += ", {}".format(heating_rate)
part_two = heating_rates[-1]
unit = tga_exp[exp_key]["heating_rate"]["unit"]
readme_lines = "* Heating Rates: {} and {} {}".format(part_one,
part_two,
unit)
tga_readme_lines.append(readme_lines)
# Temperature program.
readme_lines = build_major_bullet_point(tga_exp[exp_keys[0]],
"temperature_program")
tga_readme_lines.append(readme_lines)
# Sample mass.
sample_masses = list()
# Get all sample masses.
for exp_key in exp_keys:
sample_masses.append(tga_exp[exp_key]["sample_mass"]["value"])
# Remove duplicates.
sample_masses = list(dict.fromkeys(sample_masses))
# Build the summary of different sample masses.
unit = tga_exp[exp_key]["sample_mass"]["unit"]
readme_lines = "* Sample Mass: {} - {} {}".format(min(sample_masses),
max(sample_masses),
unit)
tga_readme_lines.append(readme_lines)
# Sample geometry.
readme_lines = build_medium_bullet_point(tga_exp[exp_keys[0]],
"sample_geometry")
tga_readme_lines.append(readme_lines)
# Calibration type
readme_lines = build_medium_bullet_point(tga_exp[exp_keys[0]],
"calibration_type")
tga_readme_lines.append(readme_lines)
# Crucible
readme_lines = build_major_bullet_point(tga_exp[exp_keys[0]],
"crucible")
tga_readme_lines.append(readme_lines)
# Carrier Gas
readme_lines = build_major_bullet_point(tga_exp[exp_keys[0]],
"carrier_gas")
tga_readme_lines.append(readme_lines)
# Instrument
readme_lines = build_major_bullet_point(tga_exp[exp_keys[0]],
"instrument")
tga_readme_lines.append(readme_lines)
# Test condition summary table
if exp_table_df is not None:
table_header = "###### Test Condition Summary"
tga_readme_lines.append(table_header)
table_lines = build_test_condition_table(exp_table_df)
tga_readme_lines.append(table_lines)
# Flatten list of new README lines.
tga_readme_lines = list( | pd_flatten(tga_readme_lines) | pandas.core.common.flatten |
# coding: utf-8
# # VISIONS'18: Tucker trawl 2018-07-21
# Cruise number: RR1812 (R/V <NAME>)
#
# This notebook shows an estimation of where the time-depth trajectory of the Tucker trawl tow on 2018-07-21 was with respect to the animals in the water column (observed through ADCP).
# ## Loading ADCP raw beam data
# First let's load in some libraries we will need to read and plot the data.
# In[1]:
import os, re, glob
import numpy as np
import matplotlib.pyplot as plt
import datetime
import arlpy # ARL underwater acoustics toolbox
from mpl_toolkits.axes_grid1 import make_axes_locatable
# sys.path.append('/Users/wujung/adcpcode/programs')
from pycurrents.adcp.rdiraw import Multiread
import adcp_func
# Find out what are the available ADCP raw files.
# In[2]:
# Set up paths and params
pname_150 = '/Volumes/current_cruise/adcp/RR1812/raw/os150/'
fname_150 = glob.glob(pname_150+'rr2018_202*.raw')
fname_150.sort() # sort filename
fname_150
# It's a bit of a guess work to figure out which files contain the section during the net tow.
#
# We know the last number string in the filename are the number of seconds since 00:00 of the day. The net tow was in water around 03:26 UTC time = 12360 secs. This means files `rr2018_202_07200.raw` and `rr2018_202_14400.raw` should cover the section of the net tow.
#
# Let's give it a try!
# In[3]:
m_150,data_150,param_150 = adcp_func.load_raw_files([pname_150+'rr2018_202_07200.raw',pname_150+'rr2018_202_14400.raw'])
# Next we grab the time stamp from the ADCP raw data stream.
# In[216]:
# set up x-axis (time stamp) for ADCP data
ping_jump_150 = int(np.floor(data_150.dday.shape[0]/8))
ping_num_150 = np.arange(0,data_150.amp1.shape[0],ping_jump_150)
time_str_150 = [str('%02d'%data_150.rVL['Hour'][x])+':'+str('%02d'%data_150.rVL['Minute'][x]) for x in ping_num_150]
# Let's plot and check if the data make sense.
# In[217]:
val_mtx = data_150.amp1-param_150['absorption']-2*param_150['spreading_loss']
actual_depth_bin = np.round(param_150['range'],2)
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(1,1,1)
im = ax.imshow(val_mtx.T,aspect='auto',interpolation='none', extent=[0,val_mtx.shape[0],actual_depth_bin[-1],actual_depth_bin[0]], vmin=160, vmax=260)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="1%", pad=0.05)
cbar = plt.colorbar(im,cax=cax)
cbar.ax.tick_params(labelsize=12)
ax.set_xticks(ping_num_150)
ax.set_xticklabels(time_str_150,fontsize=12)
ax.set_xlabel('UTC Time (hr:min)',fontsize=14)
ax.set_yticklabels(np.arange(0,400,50),fontsize=12)
ax.set_ylabel('Depth (m)',fontsize=14)
ax.set_ylim([350,0])
ax.set_title('ADCP 150 kHz "echogram"',fontsize=14)
plt.show()
# We can see a strong diel vertical migration (DVM) signal starting around 04:00 UTC time, which is about 19:00 local time, so the ADCP echogram makes sense. The Tucker trawl was in water during 03:26-04:13 UTC time, right around when the DVM happened.
# ## Loading net time-depth trajectory
# Let's now try putting the net tow time-depth trajectory onto the echogram to see which were the layers we actually sampled.
# In[218]:
import pandas as pd
from pytz import common_timezones
# In[219]:
csv_pname = '/Volumes/Transcend/Dropbox/Z_wjlee/20180719_ooi_cruise/net_tow/'
csv_fname = '20180721_EAO600m_tow.csv'
# In[220]:
net = pd.read_csv(csv_pname+csv_fname, names=['Index','Device_ID','File_ID', 'year','month','day','hour','minute','second', 'Offset','Pressure','Temperature'])
# In[221]:
net['second'] = net['Offset']
# ## Plotting net time-depth trajectory on ADCP echogram
# Now we mess around with the timestamps from the ADCP and the time-depth sensor on the net. The goal is to plot the time-depth trajectory directly on the ADCP echogram.
# First we create a `datetime` string for the time-depth sensor on the net.
# In[222]:
net_timestamp = pd.to_datetime(net.loc[:, 'year':'second'])
net_timestamp = net_timestamp.dt.tz_localize('US/Pacific').dt.tz_convert('UTC') # convert from Pacific to UTC
# In[223]:
net_depth = | pd.Series((net['Pressure']-1013.25)*0.010197442889221,name='depth') | pandas.Series |
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
import sklearn.metrics as skm
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn import tree
import pandas as pd
import numpy as np
import pydotplus
import joblib
import time
import re
import os
import pickle
from numpy import argmax
import functions as func
from keras import backend as Kb
from keras.utils import to_categorical
import gc
from imblearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.metrics import fbeta_score
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler
def R2_measure(y_true, y_pred):
return r2_score(y_true, y_pred)
def f2_measure(y_true, y_pred):
return fbeta_score(y_true, y_pred, labels=[1, 2], beta=2, average='micro')
def main():
models = ['NN'] # 'LSTM', 'NN', 'LR', 'RF', 'DT', 'SVC',
# 'DOcategory', 'pHcategory','ph', 'dissolved_oxygen',
targets = ['pHcategory']
sondefilename = 'leavon_wo_2019-07-01-2020-01-15'
n_job = -1
for model_name in models:
print(model_name)
for target in targets:
if target.find('category') > 0:
cat = 1
directory = 'Results/bookOne/output_Cat_' + model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta'}
else:
cat = 0
directory = 'Results/bookOne/output_Reg_' + model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mpe': 'mpe', 'rmse': 'rmse', 'R2': 'R2'}
if not os.path.exists(directory):
os.makedirs(directory)
directoryresult = directory + 'Results/'
if not os.path.exists(directoryresult):
os.makedirs(directoryresult)
resultFileName = 'results_'+target+str(time.time())+'.csv'
dfheader = pd.DataFrame(data=data, index=[0])
dfheader.to_csv(directoryresult+resultFileName,
index=False, header=False)
path = 'Sondes_data/train/train_data/'
testpath = 'Sondes_data/test/test_data/'
method = 'OrgData'
for PrH_index in [1, 3, 6, 12, 24, 36, 48]:
params = func.trained_param_grid[
'param_grid_'+model_name+str(cat)]
lags = func.getlags_window(
model_name, params['param_'+target+'_'+str(PrH_index)], cat)
files = [f for f in os.listdir(path) if f.endswith(
'.csv') and f.startswith(sondefilename)]
file1 = files[0]
print(' TH: ' +
str(PrH_index)+' '+method+' '+target+' '+file1)
dataset = pd.read_csv(path+file1)
train_X_grid, train_y_grid, input_dim, features = func.preparedata(
dataset, PrH_index, lags, target, cat)
print(input_dim)
if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'):
train_y_grid = to_categorical(train_y_grid, 3)
start_time = time.time()
mo = func.getModel(
model_name, input_dim, params['param_'+target+'_'+str(PrH_index)], n_job, cat)
if model_name == 'RF' or model_name == 'DT':
pipeline = Pipeline(steps=[('model', mo)])
else:
pipeline = Pipeline(
steps=[('n', StandardScaler()), ('model', mo)])
# save the model to disk
filename = model_name+'_model_' + \
target+'_'+str(PrH_index)+'.sav'
if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'):
clf = pipeline.fit(train_X_grid, train_y_grid, model__class_weight={
0: 1, 1: 50, 2: 100})
else:
clf = pipeline.fit(train_X_grid, train_y_grid)
# joblib.dump(clf, directory+filename)
pickle.dump(clf, open(directory+filename, 'wb'))
# To load the model, open the file in reading and binary mode
# load_lr_model =pickle.load(open(filename, 'rb'))
elapsed_time = time.time() - start_time
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
#################################
# Testing final model on test data
#################################
start_time = time.time()
testsondefilename = re.sub('wo_', '', sondefilename)
files = [f for f in os.listdir(testpath) if f.endswith(
'.csv')and f.startswith(testsondefilename)]
file1 = files[0]
print('Window: '+str(lags) + ' TH: ' +
str(PrH_index)+' '+method+' '+target+file1)
dataset = pd.read_csv(testpath+file1)
test_X_grid, test_y_grid, input_dim, features = func.preparedata(
dataset, PrH_index, lags, target, cat)
if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'):
test_y_grid = to_categorical(test_y_grid, 3)
i = 1
custom_cv = func.custom_cv_kfolds_testdataonly(
test_X_grid, 100)
for test_index in custom_cv:
test_X = test_X_grid[test_index]
test_y = test_y_grid[test_index]
predictions = clf.predict(test_X)
if model_name == 'LSTM' or model_name == 'NN':
test_y = argmax(test_y, axis=1)
# predictions = argmax(predictions, axis=1)
if cat == 1:
predictions = np.array(predictions).astype(int)
test_y = np.array(test_y).astype(int)
test_y = test_y.reshape(len(test_y),)
predictions = predictions.reshape(len(predictions),)
if i % 10 == 0:
plt.scatter(np.arange(len(test_y)),
test_y, s=1)
plt.scatter(np.arange(len(predictions)),
predictions, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
fpath = filename + '_CV'+str(i) + file1
# 'predictions_' + method+target+'_Window' + str(lags) + '_TH'+str(PrH_index) + \'_CV' + str(i)+file1
plt.savefig(directoryresult+fpath+'.jpg')
plt.close()
data = {'Actual': test_y, 'Predictions': predictions}
print(test_y.shape)
print(predictions.shape)
df = pd.DataFrame(data=data)
df.to_csv(directoryresult+filename +
'_CV'+str(i) + file1, index=False)
cm0 = func.forecast_accuracy(predictions, test_y, cat)
if cat == 1:
data = {'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'CV': i,
'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}
elif cat == 0:
data = {'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'CV': i,
'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}
df = | pd.DataFrame(data=data, index=[0]) | pandas.DataFrame |
from __future__ import division, print_function, absolute_import
import os
import traceback
import scipy.misc as misc
import matplotlib.pyplot as plt
import numpy as np
import glob
import pandas as pd
import random
from PIL import Image, ImageOps
def get_data_A1A4(data_path, split_load):
# Getting images (x data)
imgname_train_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(h)+'/*.png') for h in split_load[0]])
imgname_train_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(h)+'/*.png') for h in split_load[0]])
imgname_val_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[1])+'/*.png')])
imgname_val_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[1])+'/*.png')])
imgname_test_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1'+str(split_load[2])+'/*.png')])
imgname_test_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4'+str(split_load[2])+'/*.png')])
filelist_train_A1 = list(np.sort(imgname_train_A1.flat)[1::2])
filelist_train_A4 = list(np.sort(imgname_train_A4.flat)[1::2])
filelist_train_A1_fg = list(np.sort(imgname_train_A1.flat)[0::2])
filelist_train_A4_fg = list(np.sort(imgname_train_A4.flat)[0::2])
filelist_train_A1_img = np.array([np.array(filelist_train_A1[h][-16:]) for h in range(0,len(filelist_train_A1))])
filelist_train_A4_img = np.array([np.array(filelist_train_A4[h][-17:]) for h in range(0,len(filelist_train_A4))])
filelist_train_A1_set = np.array([np.array(filelist_train_A1[h][-20:-18]) for h in range(0,len(filelist_train_A1))])
filelist_train_A4_set = np.array([np.array(filelist_train_A4[h][-20:-18]) for h in range(0,len(filelist_train_A4))])
filelist_val_A1 = list(np.sort(imgname_val_A1.flat)[1::2])
filelist_val_A4 = list(np.sort(imgname_val_A4.flat)[1::2])
filelist_val_A1_fg = list(np.sort(imgname_val_A1.flat)[0::2])
filelist_val_A4_fg = list(np.sort(imgname_val_A4.flat)[0::2])
filelist_val_A1_img = np.array([np.array(filelist_val_A1[h][-16:]) for h in range(0,len(filelist_val_A1))])
filelist_val_A4_img = np.array([np.array(filelist_val_A4[h][-17:]) for h in range(0,len(filelist_val_A4))])
filelist_val_A1_set = np.array([np.array(filelist_val_A1[h][-20:-18]) for h in range(0,len(filelist_val_A1))])
filelist_val_A4_set = np.array([np.array(filelist_val_A4[h][-20:-18]) for h in range(0,len(filelist_val_A4))])
filelist_test_A1 = list(np.sort(imgname_test_A1.flat)[1::2])
filelist_test_A4 = list(np.sort(imgname_test_A4.flat)[1::2])
filelist_test_A1_fg = list(np.sort(imgname_test_A1.flat)[0::2])
filelist_test_A4_fg = list(np.sort(imgname_test_A4.flat)[0::2])
filelist_test_A1_img = np.array([np.array(filelist_test_A1[h][-16:]) for h in range(0,len(filelist_test_A1))])
filelist_test_A4_img = np.array([np.array(filelist_test_A4[h][-17:]) for h in range(0,len(filelist_test_A4))])
filelist_test_A1_set = np.array([np.array(filelist_test_A1[h][-20:-18]) for h in range(0,len(filelist_test_A1))])
filelist_test_A4_set = np.array([np.array(filelist_test_A4[h][-20:-18]) for h in range(0,len(filelist_test_A4))])
x_train_A1 = np.array([np.array(Image.open(fname)) for fname in filelist_train_A1])
x_train_A1 = np.delete(x_train_A1,3,3)
x_train_A4 = np.array([np.array(Image.open(fname)) for fname in filelist_train_A4])
x_train_A1_fg = np.array([np.array(Image.open(fname)) for fname in filelist_train_A1_fg])
x_train_A4_fg = np.array([np.array(Image.open(fname)) for fname in filelist_train_A4_fg])
x_val_A1 = np.array([np.array(Image.open(fname)) for fname in filelist_val_A1])
x_val_A1 = np.delete(x_val_A1,3,3)
x_val_A4 = np.array([np.array(Image.open(fname)) for fname in filelist_val_A4])
x_val_A1_fg = np.array([np.array(Image.open(fname)) for fname in filelist_val_A1_fg])
x_val_A4_fg = np.array([np.array(Image.open(fname)) for fname in filelist_val_A4_fg])
x_test_A1 = np.array([np.array(Image.open(fname)) for fname in filelist_test_A1])
x_test_A1 = np.delete(x_test_A1,3,3)
x_test_A4 = np.array([np.array(Image.open(fname)) for fname in filelist_test_A4])
x_test_A1_fg = np.array([np.array(Image.open(fname)) for fname in filelist_test_A1_fg])
x_test_A4_fg = np.array([np.array(Image.open(fname)) for fname in filelist_test_A4_fg])
x_train_res_A1 = np.array([misc.imresize(x_train_A1[i],[317,309,3]) for i in range(0,len(x_train_A1))])
x_train_res_A4 = np.array([misc.imresize(x_train_A4[i],[317,309,3]) for i in range(0,len(x_train_A4))])
x_val_res_A1 = np.array([misc.imresize(x_val_A1[i],[317,309,3]) for i in range(0,len(x_val_A1))])
x_val_res_A4 = np.array([misc.imresize(x_val_A4[i],[317,309,3]) for i in range(0,len(x_val_A4))])
x_test_res_A1 = np.array([misc.imresize(x_test_A1[i],[317,309,3]) for i in range(0,len(x_test_A1))])
x_test_res_A4 = np.array([misc.imresize(x_test_A4[i],[317,309,3]) for i in range(0,len(x_test_A4))])
x_train_res_A1_fg = np.array([misc.imresize(x_train_A1_fg[i],[317,309,3]) for i in range(0,len(x_train_A1_fg))])
x_train_res_A4_fg = np.array([misc.imresize(x_train_A4_fg[i],[317,309,3]) for i in range(0,len(x_train_A4_fg))])
x_val_res_A1_fg = np.array([misc.imresize(x_val_A1_fg[i],[317,309,3]) for i in range(0,len(x_val_A1))])
x_val_res_A4_fg = np.array([misc.imresize(x_val_A4_fg[i],[317,309,3]) for i in range(0,len(x_val_A4))])
x_test_res_A1_fg = np.array([misc.imresize(x_test_A1_fg[i],[317,309,3]) for i in range(0,len(x_test_A1_fg))])
x_test_res_A4_fg = np.array([misc.imresize(x_test_A4_fg[i],[317,309,3]) for i in range(0,len(x_test_A4_fg))])
x_train_all = np.concatenate((x_train_res_A1, x_train_res_A4), axis=0)
x_val_all = np.concatenate((x_val_res_A1, x_val_res_A4), axis=0)
x_test_all = np.concatenate((x_test_res_A1, x_test_res_A4), axis=0)
for h in range(0,len(x_train_all)):
x_img = x_train_all[h]
x_img_pil = Image.fromarray(x_img)
x_img_pil = ImageOps.autocontrast(x_img_pil)
x_img_ar = np.array(x_img_pil)
x_train_all[h] = x_img_ar
for h in range(0,len(x_val_all)):
x_img = x_val_all[h]
x_img_pil = Image.fromarray(x_img)
x_img_pil = ImageOps.autocontrast(x_img_pil)
x_img_ar = np.array(x_img_pil)
x_val_all[h] = x_img_ar
for h in range(0,len(x_test_all)):
x_img = x_test_all[h]
x_img_pil = Image.fromarray(x_img)
x_img_pil = ImageOps.autocontrast(x_img_pil)
x_img_ar = np.array(x_img_pil)
x_test_all[h] = x_img_ar
x_train_all_fg = np.concatenate((x_train_res_A1_fg, x_train_res_A4_fg), axis=0)
x_val_all_fg = np.concatenate((x_val_res_A1_fg, x_val_res_A4_fg), axis=0)
x_test_all_fg = np.concatenate((x_test_res_A1_fg, x_test_res_A4_fg), axis=0)
sum_train_all = np.zeros((len(x_train_all_fg),1))
sum_val_all = np.zeros((len(x_val_all_fg),1))
sum_test_all = np.zeros((len(x_test_all_fg),1))
for i in range(0, len(x_train_all_fg)):
x_train_all_fg[i][x_train_all_fg[i] > 0] = 1
sum_train_all[i] = np.sum(x_train_all_fg[i])
for i in range(0, len(x_val_all_fg)):
x_val_all_fg[i][x_val_all_fg[i] > 0] = 1
sum_val_all[i] = np.sum(x_val_all_fg[i])
for i in range(0, len(x_test_all_fg)):
x_test_all_fg[i][x_test_all_fg[i] > 0] = 1
sum_test_all[i] = np.sum(x_test_all_fg[i])
x_train_img = np.concatenate((filelist_train_A1_img, filelist_train_A4_img), axis=0)
x_val_img = np.concatenate((filelist_val_A1_img, filelist_val_A4_img), axis=0)
x_test_img = np.concatenate((filelist_test_A1_img, filelist_test_A4_img), axis=0)
x_train_set = np.concatenate((filelist_train_A1_set, filelist_train_A4_set), axis=0)
x_val_set = np.concatenate((filelist_val_A1_set, filelist_val_A4_set), axis=0)
x_test_set = np.concatenate((filelist_test_A1_set, filelist_test_A4_set), axis=0)
# Getting targets (y data) #
counts_A1 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A1.xlsx')])
counts_A4 = np.array([glob.glob(data_path+'/CVPPP2017_LCC_training/TrainingSplits/A4.xlsx')])
counts_train_flat_A1 = list(counts_A1.flat)
train_labels_A1 = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import bin
"""
Blue: #0C5DA5
Green: #00B945
"""
plt.style.use(['science', 'ieee', 'std-colors'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
sciblue = '#0C5DA5'
scigreen = '#00B945'
# ----------------------------------------------------------------------------------------------------------------------
"""
NOTE: There are two parts to this analysis:
A. Calculate the mean rmse_z by grouping dataframes.
B. Bin and plot rmse_z by dx.
"""
# ----------------------------------------------------------------------------------------------------------------------
# PART A.
# filepaths
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/figure data/grid-overlap/'
path_read = base_dir + 'results/test-coords/'
path_figs = base_dir + 'figs/'
path_results = base_dir + 'results/average/'
fp1 = path_read + 'test_id1_coords_static_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp2 = path_read + 'test_id2_coords_static_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp3 = path_read + 'test_id11_coords_SPC_grid-overlap-random-z-nl1_percent_overlap.xlsx'
fp4 = path_read + 'test_id12_coords_SPC_grid-overlap-random-z-nl1_percent_overlap.xlsx'
df1 = pd.read_excel(fp1)
df2 = pd.read_excel(fp2)
df3 = pd.read_excel(fp3)
df4 = pd.read_excel(fp4)
# concat IDPT and SPCT dataframes
dfi = pd.concat([df1, df2], ignore_index=True)
dfs = | pd.concat([df3, df4], ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv')
# Dados finais
alexandre_final_2015 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2015_af.csv')
alexandre_final_2017 = | pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2017_af.csv') | pandas.read_csv |
import sys
import os
import pandas
import pandas as pd
import re
import yfinance as yf
import datetime
from api.model import Securities, Positions
class TSF:
"""
Trade Station transaction fields
"""
symbol = "Symbol"
symbol_db = "symbol"
principal = "Principal"
quantity = "Qty"
commission = "Comm"
purchase_price = "Price"
dropped = ["Date", "execution_date", "cusip", "Cusip", "Order Id", "institute_order_id", "Net Amt", purchase_price,
"unit_price"]
summation = [quantity, principal, commission]
buy_sell = "side"
dropped_db = ["execution_date", "cusip", "institute_order_id", "unit_price", "created_at", "transaction_id",
"account_id", "execution_date"]
summation_db = ["commission", "quantity", "other_fees", "principal"]
class PF(TSF):
"""
Portfolio fields
"""
tranches = "Tranches"
current_price = "Closing Price"
position_cache_flow = "Position Cache Flow"
position_cache_flow_percent = "Position Cache Flow %"
position_market_value = "Position Market Value"
position_allocation_percent = "Position Allocation %"
position_yoc = "Position YOC"
forward_div_rate = "Forward Div Rate"
summary = [TSF.quantity, TSF.principal, position_cache_flow, TSF.commission, position_market_value]
yahoo_field_map = {
"Sector": "sector",
"Name": "shortName",
PF.current_price: "previousClose", # regularMarketPreviousClose
PF.forward_div_rate: "dividendRate",
"Forward Div Yield": "dividendYield",
"Forward PE": "forwardPE",
"Forward EPS": "forwardEps",
"Price To Book": "priceToBook",
"Payout Ratio": "payoutRatio",
"Ex-Div Date": "exDividendDate",
"Market Cap": "marketCap",
}
class PortfolioBuilder:
def __init__(self):
self.portfolio = None
self.transactions = None
def load_xlsx_transactions(self, transactions_xlsx):
# Import the Sample worksheet with acquisition dates and initial cost basis:
print("Reading Transaction ...")
self.transactions = | pd.read_excel(transactions_xlsx, sheet_name='Sheet1') | pandas.read_excel |
from datetime import datetime
import urllib.request
import pandas as pd
import zipfile
import requests
import plotly
import plotly.graph_objects as go
import folium
from branca.element import Template, MacroElement
from bs4 import BeautifulSoup
from datetime import datetime
from dateutil.relativedelta import relativedelta
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
#Plot 1 and 2 start
## Get EPRACCUR data from NHSD
url = 'https://files.digital.nhs.uk/assets/ods/current/epraccur.zip'
filehandle, _ = urllib.request.urlretrieve(url)
zip_file_object = zipfile.ZipFile(filehandle, 'r')
first_file = zip_file_object.namelist()[0]
file = zip_file_object.open(first_file)
content = file.read()
csv_file = open('assets/data/epraccur_data.csv', 'wb')
csv_file.write(content)
csv_file.close()
header_list = ["Organisation Code", "Name", "National Grouping", "High Level Health Geography", "Address line 1", "Address line 2", "Address line 3",
"Address line 4", "Address line 5","Postcode","Open Date","Close Date","Status Code","Organisation Sub-Type Code","Commissioner","Join Provider/Purchaser Date",
"Left Provider/Purchaser Date","Contact Telephone Number", "Null 1", "Null 2", "Null 3", "Amended Record Indicator", "Null 4", "Provider/Purchaser",
"Null 5", "Prescribing Setting", "Null 6"]
## Get EPRACCUR data from NHSD end
##EPRACCUR data processing
gp_practice_df = pd.read_csv('assets/data/epraccur_data.csv', names=header_list)
gp_practice_df.fillna('', inplace=True)
gp_practice_df['Partial Address'] = gp_practice_df[['Address line 1', 'Address line 2', 'Address line 3', 'Address line 4',]].agg(', '.join, axis=1)
gp_practice_df['Full Address'] = gp_practice_df[['Partial Address', 'Address line 5',]].agg(' '.join, axis=1)
gp_practice_df['Full Address'] = gp_practice_df['Full Address'].str.title()
gp_practice_df['Name'] = gp_practice_df['Name'].str.title()
gp_practice_df_1 = gp_practice_df.drop(columns = {"High Level Health Geography", "Address line 1", "Address line 2", "Address line 3", "Address line 4",
"Address line 5", "Open Date", "Close Date", "Organisation Sub-Type Code", "Commissioner", "Join Provider/Purchaser Date", "Left Provider/Purchaser Date",
"Null 1", "Null 2", "Null 3", "Amended Record Indicator", "Null 4", "Partial Address", "Provider/Purchaser", "Null 5", "Null 6"})
gp_practice_df_2 = gp_practice_df_1[gp_practice_df_1["Status Code"] == "A"]
gp_practice_df_3 = gp_practice_df_2[gp_practice_df_2["Prescribing Setting"] == 4]
gp_practice_df_eng = gp_practice_df_3[gp_practice_df_3["National Grouping"].str.contains("YAC|YAD|YAE|YAF|W00")==False]
gp_practice_df_eng_1 = gp_practice_df_eng.reset_index(drop = True)
gp_practice_df_eng_2 = gp_practice_df_eng_1.copy()
gp_practice_df_eng_3 = gp_practice_df_eng_2.drop( columns = {"Status Code", "Prescribing Setting"})
gp_practice_df_ldn = gp_practice_df_eng_3[gp_practice_df_eng_3["National Grouping"].str.contains("Y56")==True]
gp_practice_df_ldn['Name'] = gp_practice_df_ldn['Name'].str.replace('Gp', 'GP')
gp_practice_df_ldn['Full Address'] = gp_practice_df_ldn['Full Address'].str.replace(' ,', ' ').str.replace(' ', ' ').str.replace('Gp', 'GP').map(lambda x: x.rstrip(', '))
gp_practice_df_ldn_2 = gp_practice_df_ldn[gp_practice_df_ldn["Organisation Code"].str.contains("E85124|Y06487")==False]
gp_practice_df_ldn_3 = gp_practice_df_ldn_2.reset_index(drop = True)
##EPRACCUR data processing end
##Get Patients registered at GP practices data from NHSD
month_year_variable = datetime.now().strftime('%B-%Y').lower()
url = "https://digital.nhs.uk/data-and-information/publications/statistical/patients-registered-at-a-gp-practice/%s" %month_year_variable
response = urllib.request.urlopen(url)
soup = BeautifulSoup(response.read(), "lxml")
data = soup.select_one("a[href*='gp-reg-pat-prac-all.csv']")
if data != None:
csv_url = data['href']
req = requests.get(csv_url)
url_content = req.content
csv_file = open('assets/data/gp_pop_data.csv', 'wb')
csv_file.write(url_content)
csv_file.close()
else:
last_month = datetime.now() - relativedelta(months=1)
last_month_year_variable = last_month.strftime('%B-%Y').lower()
url = "https://digital.nhs.uk/data-and-information/publications/statistical/patients-registered-at-a-gp-practice/%s" %last_month_year_variable
response = urllib.request.urlopen(url)
soup = BeautifulSoup(response.read(), "lxml")
data = soup.select_one("a[href*='gp-reg-pat-prac-all.csv']")
csv_url = data['href']
req = requests.get(csv_url)
url_content = req.content
csv_file = open('assets/data/gp_pop_data.csv', 'wb')
csv_file.write(url_content)
csv_file.close()
gp_pop_df = pd.read_csv('assets/data/gp_pop_data.csv')
gp_pop_df.rename(columns={'CODE': 'Organisation Code', 'NUMBER_OF_PATIENTS': 'Number of patients registered at GP practices in England'}, inplace=True)
gp_pop_df_1 = gp_pop_df.drop(columns = {'PUBLICATION', 'EXTRACT_DATE', 'TYPE', 'CCG_CODE', 'ONS_CCG_CODE', 'SEX', 'AGE', 'POSTCODE'})
gp_pop_df_1 = gp_pop_df_1[gp_pop_df_1 ["Organisation Code"].str.contains("E85124|Y06487")==False]
gp_pop_df_1 = gp_pop_df_1.reset_index(drop = True)
##Get Patients registered at GP practices data from NHSD end
##Merge EPRACCUR and patients registered at GP practices data
gp_pop_ldn = gp_practice_df_ldn_3.join(gp_pop_df_1, rsuffix='Organisation Code')
gp_pop_ldn.rename(columns={'Number of patients registered at GP practices in England': 'Number of patients registered at the GP practice'}, inplace=True)
gp_pop_ldn["Address"] = gp_pop_ldn[["Full Address", "Postcode"]].agg(', '.join, axis=1)
gp_pop_ldn_1 = gp_pop_ldn.drop(columns={'Organisation CodeOrganisation Code', 'National Grouping', 'Full Address'})
gp_pop_ldn_1 = gp_pop_ldn_1[["Organisation Code", "Name", "Address", "Postcode", "Contact Telephone Number", "Number of patients registered at the GP practice"]]
##Merge EPRACCUR and patients registered at GP practices data end
##Visualization Plot 1
x0 = gp_pop_ldn_1['Number of patients registered at the GP practice']
x1 = gp_pop_df_1['Number of patients registered at GP practices in England']
count_england = gp_pop_df_1['Number of patients registered at GP practices in England'].count()
count_london = gp_pop_ldn_1['Number of patients registered at the GP practice'].count()
fig_1 = go.Figure()
fig_1.add_trace(go.Box(x=x0,
boxmean=True,
boxpoints= 'all',
jitter=0.3,
name="London",
marker_color ="#0072CE",
whiskerwidth=0.5,
marker_size=3,
line_width=2))
fig_1.add_trace(go.Box(x=x1,
boxmean=True,
boxpoints= 'all',
jitter=0.3,
name="England",
marker_color = "#003087",
whiskerwidth=0.5,
marker_size=3,
line_width=2))
fig_1.update_layout(
{"plot_bgcolor": "rgba(0, 0, 0, 0)", "paper_bgcolor": "rgba(0, 0, 0, 0)"},
font = dict(family = "Arial", size = 16),
autosize=True,
margin=dict(l=75, r=50, b=160, t=30, pad=4, autoexpand=True), hoverlabel=dict(
font_size=12,
font_family="Arial"
), xaxis=dict(title='Number of patients registered at individual GP practices', zeroline=False))
fig_1.add_annotation(dict(font=dict(family = "Arial",size=15),
x=0.33,
y=-0.40,
showarrow=False,
text="Number of GP practices in England: %s" %count_england,
textangle=0,
xanchor='right',
xref="paper",
yref="paper"))
fig_1.add_annotation(dict(font=dict(family = "Arial",size=15),
x=0.323,
y=-0.46,
showarrow=False,
text="Number of GP practices in London: %s" %count_london,
textangle=0,
xanchor='right',
xref="paper",
yref="paper"))
##Visualization Plot 1 end
## Write out to file (.html) Plot 1
config = {"displayModeBar": False, "displaylogo": False}
plotly_obj = plotly.offline.plot(
fig_1, include_plotlyjs=False, output_type="div", config=config
)
with open("_includes/plotly_obj.html", "w") as file:
file.write(plotly_obj)
## Write out to file (.html) Plot 1 end
#Merge new GP practice data with data from previous timepoint to avoid uncessary Nomatin API requests
file_name = 'assets/data/gp_pop_ldn_mapped.csv'
old_data = pd.read_csv(file_name, index_col=0)
gp_pop_ldn_1 = gp_pop_ldn_1.merge(old_data[['Organisation Code','loc', 'Point', 'Latitude', 'Longitude', 'Altitude']],on='Organisation Code', how = 'left')
gp_pop_ldn_1.rename(columns={'loc_x': 'loc', 'Point_x': 'Point', 'Latitude_x': 'Latitude', 'Longitude_x': 'Longitude', 'Altitude_x': 'Altitude' }, inplace=True)
#Merge new GP practice data with data from previous timepoint to avoid uncessary Nomatin API requests end
##Get GP practice coordinates using geopy if New GP practcies added to EPRACCUR
geolocator = Nominatim(user_agent="open_access_nhs")
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
if gp_pop_ldn_1['loc'].count() != gp_pop_ldn_1['Organisation Code'].count():
missing_data = pd.isnull(gp_pop_ldn_1["loc"])
missing_data_df = gp_pop_ldn_1[missing_data]
missing_data_df["loc"] = missing_data_df["Postcode"].apply(geolocator.geocode)
missing_data_df["Point"]= missing_data_df["loc"].apply(lambda loc: tuple(loc.point) if loc else None)
missing_data_df[['Latitude', 'Longitude', 'Altitude']] = pd.DataFrame(missing_data_df['Point'].to_list(), index=missing_data_df.index)
gp_pop_ldn_1 = gp_pop_ldn_1.dropna()
gp_pop_ldn_1 = pd.concat([gp_pop_ldn_1, missing_data_df], ignore_index=True)
gp_pop_ldn_1.to_csv(file_name)
##Get GP practice coordinates using geopy if New GP practcies added to EPRACCUR end
##Visualization Plot 2
gp_prac_pop_df_1 = pd.read_csv(file_name, index_col=0)
gp_prac_pop_df_1['GP Patient Number Quintile'] = | pd.qcut(gp_prac_pop_df_1['Number of patients registered at the GP practice'], 5, labels=False) | pandas.qcut |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
| tm.assert_index_equal(agged.index, df.columns) | pandas._testing.assert_index_equal |
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
# from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
import copy
path = '../the-movies-dataset/'
md = | pd.read_csv(path + 'final_metadata.csv') | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = | Categorical(["a", "b", "c", "a"]) | pandas.Categorical |
import os
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
def get_result(l_type="mae", ph=6, ind=0, path="ohio_results", standard=True):
path = f"{path}/ph_{ph}_{l_type}"
# pids = [552, 544, 567, 584, 596, 559,
# 563, 570, 588, 575, 591, 540]
pids = [
540,
544,
552,
567,
584,
596,
]
maes = []
rmses = []
for pid in pids:
arr = np.loadtxt(os.path.join(path, str(pid) + ".txt"))
mae = np.mean(np.abs(arr[:, 1:8:2] - arr[:, 0:8:2]), axis=0)
rmse = np.sqrt(np.mean(np.power(arr[:, 1:8:2] - arr[:, 0:8:2], 2), axis=0))
maes.append(mae)
rmses.append(rmse)
maes = np.array(maes)
rmses = np.array(rmses)
if standard:
coeff = 60.565
else:
coeff = 100
best_maes = coeff * maes[:, ind]
best_rmses = coeff * rmses[:, ind]
df = pd.DataFrame(
{"PID": pids, f"{ph*5}min MAE": best_maes, f"{ph*5}min RMSE": best_rmses}
)
return df
def get_pbp_result(l_type="mse", ph=6, ind=2, path="ohio_results", standard=True):
path = f"{path}/ph_{ph}_{l_type}"
# pids = [552, 544, 567, 584, 596, 559,
# 563, 570, 588, 575, 591, 540]
pids = [
540,
544,
552,
567,
584,
596,
]
data = []
for pid in pids:
arr = np.loadtxt(os.path.join(path, str(pid) + ".txt"))
data.append(arr)
data = np.concatenate(data, axis=0)
mae = np.mean(np.abs(data[:, 1:8:2] - data[:, 0:8:2]), axis=0)
rmse = np.sqrt(np.mean(np.power(data[:, 1:8:2] - data[:, 0:8:2], 2), axis=0))
if standard:
coeff = 60.565
else:
coeff = 100
return coeff * mae[ind], coeff * rmse[ind]
def compare_result(l_type):
path = "../ohio_results/challenge.csv"
df = pd.read_csv(path)
mae1, rmse1 = get_pbp_result(l_type, 6)
mae2, rmse2 = get_pbp_result(l_type, 12)
df = df.append(
{
"Paper ID": "ours",
"30min_MAE": mae1,
"60min_MAE": mae2,
"30min_RMSE": rmse1,
"60min_RMSE": rmse2,
},
ignore_index=True,
)
df["overall"] = (
df["30min_RMSE"] + df["30min_MAE"] + df["60min_RMSE"] + df["60min_MAE"]
)
df["30 min"] = df["30min_RMSE"] + df["30min_MAE"]
df["60 min"] = df["60min_RMSE"] + df["60min_MAE"]
df["MAE"] = df["60min_MAE"] + df["30min_MAE"]
df["RMSE"] = df["60min_RMSE"] + df["30min_RMSE"]
# print(df)
for col in list(df.columns):
if col == "Paper ID":
continue
new_df = df.sort_values(col, ignore_index=True)
if col == "MAE":
print(new_df)
print(col, new_df.index[new_df["Paper ID"] == "ours"])
def compare_only_bg_result(
l_type="mae", transfer=2, path="../ohio_results", standard=True
):
res_30 = get_result(l_type, 6, transfer, path, standard)
res_60 = get_result(l_type, 12, transfer, path, standard)
res = pd.merge(res_30, res_60, how="left", on="PID")
path = "../ohio_results/bg_ohio.xlsx"
peers = ["khadem", "bevan", "joedicke", "ma"]
result = dict()
result["metric"] = ["30min MAE", "30min RMSE", "60min MAE", "60min RMSE"]
result["ours"] = res.mean().to_numpy()[1:]
for p in peers:
df = | pd.read_excel(path, sheet_name=p) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 14:27:27 2019
@author: Reuben
Variables help resultbox know how to interpret and display data. Sometimes,
variables have different components, like x, y, and z coordinates. They
often have units, such as meters.
The idea is that we define a variable just once. Whenever we add in some
data for that variable to a Box, we also pass in that variable. Then, we
can let resultbox take care of the rest.
"""
import pandas as pd
from difflib import SequenceMatcher
from . import utils
def _expand_single(key, val, store, specified=None):
''' Expand a value into its components
Args:
key (Variable): The variable key
val: The data for that variable
store (Store): The variable store
Returns:
dict: A dictionary of keys and values. If the variables has components,
the dictionary contains the component keys and component values.
'''
if isinstance(val, dict):
r = expand(val, store)
return {key: r}
else:
if key in store:
if specified is not None and key not in specified:
return {key, val}
if store[key].subkeys is not None:
subkeys = store[key].subkeys
if len(val) == len(subkeys):
out = {}
for subkey, v in zip(subkeys, val):
out[subkey] = v
return out
return {key: val}
def expand(source, store, specified=None):
''' Expand variable components within a list or dictionary recursively
Args:
source (list or dict): The source list (of dictionaries) or dictionary.
The keys must exist in the store.
store (Store): The corresponding Store instance.
Returns:
list or dict: The expanded list or dictionary.
'''
if isinstance(source, list):
out = []
for val in source:
r = expand(val, store)
out.append(r)
elif isinstance(source, dict):
out = {}
for key, val in source.items():
out.update( _expand_single(key, val, store, specified))
return out
class Store(dict):
''' A store is a container for Variables '''
def __init__(self, name=None, unique=True):
self.name = name
self._id_dct = {}
self._unique = unique
def new(self, name, doc=None, unit=None, components=None, sep=' - ',
category=None, tags=None, safe=True, identifier=None):
''' Create a new variable
Args:
name (str): The variable name
doc (str): A documentation string. Defaults to None.
unit (str): The units of the variable (usually abbreviated).
Defaults to None.
components (list[str]): A list of names for each
component. Defaults to None.
sep (str): The separator between the name and any component names.
category (str): An optional category
tags (list[str]): Optional tags
safe (bool): Optional. If true, do not allow duplicates. Defaults
to True.
identifier (str): [Optional] Identifier for the variable.
Returns:
Variable: The new variable
Note:
The 'add' method is a copy of this method.
'''
new = Variable(name, doc, unit, components=components, sep=sep,
category=category, tags=tags, identifier=identifier)
if self._unique:
if new.key in self and safe:
raise KeyError('Key "' + str(name) + '" already exists. Names '
+ 'must be unique.')
elif new.key in self and not safe:
return self[new.key]
self[new.key] = new
if identifier is not None:
self._id_dct[identifier] = new.key
return new
def id_starts_with(self, s):
""" Returns a list of variables with identifies matching a suffix
Args:
s (str): The string at the start of the identifiers.
Returns:
list: List of matching variables. If no variables match, the
list will be empty.
"""
d = self._id_dct
return [self[v] for k, v in d.items() if k.startswith(s)]
def nearest(self, key):
''' Return the variable that best best-matches the input string
Args:
key (str): The input string
Returns:
Variable: The variable with the key that best matches the input
'''
keys = list(self.keys())
ratios = [SequenceMatcher(None, key, k).ratio() for k in keys]
return self[keys[ratios.index(max(ratios))]]
def suffixed(self, variable, suffix):
''' Create or return a suffixed variable using an existing one
Args:
variable (Variable): A variable
suffix (str): The suffix to append to the name
Returns:
Variable: Creates a new one if needed, or returns existing.
'''
new_name = variable.name + suffix
key = Variable._append_unit(new_name, variable.unit)
if key in self:
return self[key]
else:
kwargs = variable.to_dict()
kwargs['name'] = new_name
return self.new(**kwargs)
def add_csv(self, fname, **kwargs):
usecols = ['identifier', 'name', 'doc', 'unit', 'components',
'sep', 'category', 'tags']
df = pd.read_csv(fname, usecols=usecols, **kwargs)
records = df.to_dict(orient='rows')
for dct in records:
for k in dct.keys():
if | pd.isna(dct[k]) | pandas.isna |
from __future__ import absolute_import, division, print_function
# from inspect import currentframe
import logging
import os
import random
import json
import click
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import (CONFIG_NAME, # WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification)
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from qurator.sbb_ner.models.tokenization import BertTokenizer
from tqdm import tqdm, trange
from ..embeddings.base import load_embeddings
from ..ground_truth.data_processor import WikipediaNEDProcessor
# from sklearn.model_selection import GroupKFold
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def model_train(bert_model, max_seq_length, do_lower_case,
num_train_epochs, train_batch_size, gradient_accumulation_steps,
learning_rate, weight_decay, loss_scale, warmup_proportion,
processor, device, n_gpu, fp16, cache_dir, local_rank,
dry_run, no_cuda, output_dir=None, model_file=None):
if gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
gradient_accumulation_steps))
train_batch_size = train_batch_size // gradient_accumulation_steps
train_dataloader = processor.get_train_examples(train_batch_size, local_rank)
# Batch sampler divides by batch_size!
num_train_optimization_steps = int(len(train_dataloader) * num_train_epochs / gradient_accumulation_steps)
if local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
cache_dir = cache_dir if cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE),
'distributed_{}'.format(local_rank))
model = BertForSequenceClassification.from_pretrained(bert_model, cache_dir=cache_dir,
num_labels=processor.num_labels())
if fp16:
model.half()
model.to(device)
if local_rank != -1:
try:
# noinspection PyPep8Naming
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=warmup_proportion, t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=learning_rate, warmup=warmup_proportion,
t_total=num_train_optimization_steps)
warmup_linear = None
global_step = 0
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataloader))
logger.info(" Batch size = %d", train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
logger.info(" Num epochs = %d", num_train_epochs)
logger.info(" Target learning rate = %f", learning_rate)
model_config = {"bert_model": bert_model, "do_lower": do_lower_case, "max_seq_length": max_seq_length}
def save_model(lh):
if output_dir is None:
return
if model_file is None:
output_model_file = os.path.join(output_dir, "pytorch_model_ep{}.bin".format(ep))
else:
output_model_file = os.path.join(output_dir, model_file)
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
json.dump(model_config, open(os.path.join(output_dir, "model_config.json"), "w"))
lh = pd.DataFrame(lh, columns=['global_step', 'loss'])
loss_history_file = os.path.join(output_dir, "loss_ep{}.pkl".format(ep))
lh.to_pickle(loss_history_file)
def load_model(epoch):
if output_dir is None:
return False
if model_file is None:
output_model_file = os.path.join(output_dir, "pytorch_model_ep{}.bin".format(epoch))
else:
output_model_file = os.path.join(output_dir, model_file)
if not os.path.exists(output_model_file):
return False
logger.info("Loading epoch {} from disk...".format(epoch))
model.load_state_dict(torch.load(output_model_file,
map_location=lambda storage, loc: storage if no_cuda else None))
return True
model.train()
for ep in trange(1, int(num_train_epochs) + 1, desc="Epoch"):
if dry_run and ep > 1:
logger.info("Dry run. Stop.")
break
if model_file is None and load_model(ep):
global_step += len(train_dataloader) // gradient_accumulation_steps
continue
loss_history = list()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
with tqdm(total=len(train_dataloader), desc=f"Epoch {ep}") as pbar:
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, labels = batch
loss = model(input_ids, segment_ids, input_mask, labels)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if fp16:
optimizer.backward(loss)
else:
loss.backward()
loss_history.append((global_step, loss.item()))
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
pbar.update(1)
mean_loss = tr_loss * gradient_accumulation_steps / nb_tr_steps
pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
if dry_run and len(loss_history) > 2:
logger.info("Dry run. Stop.")
break
if (step + 1) % gradient_accumulation_steps == 0:
if fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = learning_rate * warmup_linear.get_lr(global_step, warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
save_model(loss_history)
return model, model_config
def model_eval(batch_size, processor, device, num_train_epochs=1, output_dir=None, model=None,
local_rank=-1, no_cuda=False, dry_run=False, model_file=None):
output_eval_file = None
if output_dir is not None:
output_eval_file = os.path.join(output_dir, processor.get_evaluation_file())
logger.info('Write evaluation results to: {}'.format(output_eval_file))
dataloader = processor.get_dev_examples(batch_size, local_rank)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(dataloader))
logger.info(" Batch size = %d", batch_size)
if output_dir is not None:
output_config_file = os.path.join(output_dir, CONFIG_NAME)
if not os.path.exists(output_config_file):
raise RuntimeError("Cannot find model configuration file {}.".format(output_config_file))
config = BertConfig(output_config_file)
else:
raise RuntimeError("Cannot find model configuration file. Output directory is missing.")
model = None
def load_model(epoch):
nonlocal model
if output_dir is None:
return False
if model_file is None:
output_model_file = os.path.join(output_dir, "pytorch_model_ep{}.bin".format(epoch))
else:
output_model_file = os.path.join(output_dir, model_file)
if not os.path.exists(output_model_file):
logger.info("Stopping at epoch {} since model file is missing ({}).".format(ep, output_model_file))
return False
logger.info("Loading epoch {} from disk...".format(epoch))
model = BertForSequenceClassification(config, num_labels=processor.num_labels())
# noinspection PyUnresolvedReferences
model.load_state_dict(torch.load(output_model_file,
map_location=lambda storage, loc: storage if no_cuda else None))
# noinspection PyUnresolvedReferences
model.to(device)
return True
results = []
for ep in trange(1, int(num_train_epochs) + 1, desc="Epoch"):
if dry_run and ep > 1:
logger.info("Dry run. Stop.")
break
if not load_model(ep):
break
if model is None:
raise ValueError('Model required for evaluation.')
# noinspection PyUnresolvedReferences
model.eval()
results.append(model_predict_compare(dataloader, device, model))
if output_eval_file is not None:
pd.concat(results).to_pickle(output_eval_file)
def model_predict_compare(dataloader, device, model, disable_output=False):
decision_values = list()
for input_ids, input_mask, segment_ids, labels in tqdm(dataloader, desc="Evaluating", total=len(dataloader),
disable=disable_output):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
labels = labels.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
tmp = pd.DataFrame(F.softmax(logits, dim=1).cpu().numpy())
tmp['labels'] = labels.cpu().numpy()
decision_values.append(tmp)
return pd.concat(decision_values).reset_index(drop=True)
def get_device(local_rank=-1, no_cuda=False):
if local_rank == -1 or no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
return device, n_gpu
@click.command()
@click.argument("bert-model", type=str, required=True, nargs=1)
@click.argument("output-dir", type=str, required=True, nargs=1)
@click.option("--model-file", type=click.Path(), default=None, help="Continue to train on this model file.")
@click.option("--train-set-file", type=click.Path(exists=True), default=None, help="See ned-train-test-split.")
@click.option("--dev-set-file", type=click.Path(exists=True), default=None, help="See ned-train-test-split.")
@click.option("--test-set-file", type=click.Path(exists=True), default=None, help="See ned-train-test-split.")
@click.option("--train-size", default=0, type=int, help="")
@click.option("--dev-size", default=0, type=int, help="")
@click.option("--train-size", default=0, type=int, help="")
@click.option("--cache-dir", type=click.Path(), default=None,
help="Where do you want to store the pre-trained models downloaded from s3")
@click.option("--max-seq-length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n than this will be padded.")
@click.option("--do-lower-case", is_flag=True, help="Set this flag if you are using an uncased model.", default=False)
@click.option("--train-batch-size", default=32, type=int, help="Total batch size for training.")
@click.option("--eval-batch-size", default=8, type=int, help="Total batch size for eval.")
@click.option("--learning-rate", default=3e-5, type=float, help="The initial learning rate for Adam.")
@click.option("--weight-decay", default=0.01, type=float, help="Weight decay for Adam.")
@click.option("--num-train-epochs", default=3.0, type=float, help="Total number of training epochs to perform/evaluate.")
@click.option("--warmup-proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
@click.option("--no-cuda", is_flag=True, help="Whether not to use CUDA when available", default=False)
@click.option("--dry-run", is_flag=True, default=False, help="Test mode.")
@click.option("--local-rank", type=int, default=-1, help="local_rank for distributed training on gpus")
@click.option('--seed', type=int, default=42, help="random seed for initialization")
@click.option('--gradient-accumulation-steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass. default: 1")
@click.option('--fp16', is_flag=True, default=False, help="Whether to use 16-bit float precision instead of 32-bit")
@click.option('--loss-scale', type=float, default=0.0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n "
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
@click.option("--ned-sql-file", type=click.Path(exists=True), default=None, required=False, help="See ned-sentence-data")
@click.option('--embedding-type', type=click.Choice(['fasttext']), default='fasttext')
@click.option('--embedding-model', type=click.Path(exists=True), default=None)
@click.option('--n-trees', type=int, default=100)
@click.option('--distance-measure', type=click.Choice(['angular', 'euclidean']), default='angular')
@click.option('--entity-index-path', type=click.Path(exists=True), default=None)
@click.option('--entities-file', type=click.Path(exists=True), default=None)
def main(bert_model, output_dir,
train_set_file, dev_set_file, test_set_file, cache_dir, max_seq_length,
train_size=0, dev_size=0, test_size=0,
do_lower_case=False, train_batch_size=32, eval_batch_size=8, learning_rate=3e-5,
weight_decay=0.01, num_train_epochs=3, warmup_proportion=0.1, no_cuda=False, dry_run=False, local_rank=-1,
seed=42, gradient_accumulation_steps=1, fp16=False, loss_scale=0.0,
ned_sql_file=None, search_k=50, max_dist=0.25, embedding_type='fasttext', embedding_model=None, n_trees=100,
distance_measure='angular', entity_index_path=None, entities_file=None, model_file=None):
"""
bert_model: Bert pre-trained model selected in the list:\n
bert-base-uncased, bert-large-uncased, bert-base-cased,\n
bert-large-cased, bert-base-multilingual-uncased,\n
bert-base-multilingual-cased, bert-base-chinese.\n
output_dir: The output directory where the model predictions
and checkpoints will be written.\n
"""
device, n_gpu = get_device(local_rank, no_cuda)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(local_rank != -1), fp16))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if not train_size > 0 and not dev_size > 0:
raise ValueError("At least one of train_size or dev_size must be > 0.")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if ned_sql_file is not None:
if entity_index_path is None:
raise RuntimeError("entity-index-path required!")
if entities_file is None:
raise RuntimeError("entities-file required!")
embs = load_embeddings(embedding_type, model_path=embedding_model)
embeddings = {'PER': embs, 'LOC': embs, 'ORG': embs}
train_subset = pd.read_pickle(train_set_file)
dev_subset = pd.read_pickle(dev_set_file)
test_subset = | pd.read_pickle(test_set_file) | pandas.read_pickle |
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from multiprocessing.dummy import Pool as ThreadPool
import os
import time
import pandas as pd
import nltk
import numpy as np
import re
import spacy
from sklearn.feature_extraction.text import CountVectorizer
import progressbar as bar
import extractUnique as xq
import tristream_processor as stream
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # 248
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, f1_score, precision_score, recall_score
#_start = time.time()
testB = pd.read_csv("CSV/Restaurants_Test_Data_phaseB.csv")
trainB = pd.read_csv("CSV/Restaurants_Train_v2.csv")
trainB_1 = trainB.iloc[:, [0, 7, 5]]
testB_1 = testB.iloc[:, [0, 5, 4]]
del testB
fullB = pd.concat([trainB_1, testB_1], axis=0, ignore_index=True)
dataset = fullB # MAJOR DATA-SET
# --------------------- FUNCTIONS --------------------------
def check_dep_parse(token_dep):
dep_str = token_dep
# if dep_str.startswith('nsub'):
# pass
# elif dep_str.startswith('amod'):
# pass
# elif dep_str.startswith('rcmod'):
# pass
# elif dep_str.startswith('dobj'):
# pass
# elif dep_str.startswith('neg'):
# pass
if dep_str.startswith('det'):
pass
else:
return False
return True
def streamers(full_dataset):
dataset = full_dataset
# --------------------- STREAM INITIALIZER ----------------------------
PoS_Tag_sent = list()
S1_corpus = [] # CORPUS (For Collecting Lemmas)
corpora = '' # CORPORA (For Collecting Corpora of single sentence)
S2_super_corpus = [] # CORPUS (For Collecting Bigrams sentence wise)
# --------------------- SPACY SPECS ------------------------
nlp_en = spacy.load('en_core_web_sm')
plot_nlp = 0 # For Plotting of Dependency chart
S3_dep_corpus = [] # CORPUS (For Collecting Dependency Relations)
# ---------------------------------------------------------- STREAM 1 - LEMMATIZATION
stream1 = stream.lemmatize(dataset)
# ----------------------------------------------------------- STREAM 2 - BIGRAMS
stream2 = stream.bigram(dataset)
# ----------------------------------------------------------- STREAM 3 - DEPENDENCY FEATURES (spaCy)
stream3 = stream.dep_rel(dataset)
stream1.to_csv('Wave2/stream1.csv', index=False)
stream2.to_csv('Wave2/stream2.csv', index=False)
stream3.to_csv('Wave2/stream3.csv', index=False)
del S1_corpus, S2_super_corpus, S3_dep_corpus
return stream1, stream2, stream3
def sheet_generator(s1, s2, s3):
stream1 = s1
stream2 = s2
stream3 = s3
df = pd.concat([stream1, stream2, stream3], axis=1)
df = df.rename(columns={0: 'lemmas', 1: 'bigrams', 2: 'depenrel'})
df.to_csv('Wave2/FeatureSet.csv', index=False)
df = pd.read_csv('Wave2/FeatureSet.csv', sep=',')
del df
# try:
# pool = ThreadPool(2)
# pool.map(os.system('firefox localhost:5000 &'), spacy.displacy.serve(plot_nlp, style='dep')).join()
# exit(0)
# except OSError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# except TypeError:
# print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# Get Unique Features from Bi-grams, Dependency Rel
whole_df = pd.concat([dataset.iloc[0:, 0], stream1, stream2, stream3, dataset.iloc[0:, 2]], axis=1)
whole_df = whole_df.rename(columns={'text': 'reviews', 0: 'lemmas', 1: 'bigrams', 2: 'depenrel',
'aspectCategories/aspectCategory/0/_category': 'aspectCategory'})
whole_df.to_csv('Wave2/WholeSet.csv', index=False)
whole_df = pd.read_csv('Wave2/WholeSet.csv', sep=',')
u_feat = list()
try:
u_feat = xq.unique(whole_df=whole_df, bigram_col=2, dep_rel_col=3)
print("Unique Features Extracted")
except KeyboardInterrupt:
print("[STAGE 3] Manual Interrupt to Unique Features")
exit(0)
except Exception as e:
print('[STAGE 3] Improper Termination due to:', e)
exit(0)
# DF with Review, Lemmas, U_feat, Aspect Cat
Feature_df = whole_df[['reviews', 'lemmas']][0:]
Feature_df = pd.concat([Feature_df, pd.Series(u_feat), whole_df.iloc[0:, -1]], axis=1)
Feature_df = Feature_df.rename(columns={0: 'ufeat'})
Feature_df.to_csv('Wave2/Feature.csv', index=False)
del whole_df,
# Aspect Cat, Lemmas + U_feat (from All sentences)
c_list = list()
try:
Feature_df = Feature_df.dropna()
c_list = xq.combiner(Feature_df=Feature_df, lemma_col=1, uniqueFeat_col=2, use_ast=True)
except KeyboardInterrupt:
print("[STAGE 4] Manual Interrupt to Combiner")
exit(0)
except Exception as e:
print("[STAGE 4] Improper Termination due to:", e)
exit(0)
return Feature_df, c_list
def corrector(combined_features_list):
c_list = combined_features_list
ngram_list = list()
try:
st = time.time()
ngram_list = xq.get_correct_spell(word_list=c_list, split_by=';')
#syn_list = stream.syns_of_ngrams(ngram_list)
#ngram_list+=syn_list
et = time.time()
print('Time elapsed %.3f' % float(((et-st)/60)/60))
except ValueError:
print("[STAGE 5] Spell Checker | Interrupted")
except TypeError:
print("[STAGE 5] Spell Checker | Multi-threading issue")
except AttributeError:
print("[STAGE 5] Spell Checker | Attrition")
except KeyboardInterrupt:
print("[STAGE 5] Spell Checker | Forced Drop")
pd.Series(ngram_list).to_csv('Wave2/ngram_list.csv', index=False)
return ngram_list
# Creating Bag of Words Model
def creating_bow(corrected_list, features_dataframe, max_features=33433):
ngram_list = list(corrected_list)
Feature_df = features_dataframe
max_ft = max_features
cv = CountVectorizer(max_features=max_ft, ngram_range=(1, 2))
# key_Book = pd.DataFrame(itemDict, index=range(itemDict.__len__()))
# key_Book.to_csv('key_Book.csv', index=True, sep=',')
# ============================== Preparing Train set =============================
# ML with Bag of Words to Aspect Categories
X = cv.fit_transform(ngram_list).toarray()
y = Feature_df['aspectCategory']
del ngram_list
return X, y, cv.vocabulary_
def split_train_test(X, y):
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
return X_train, X_test, y_train, y_test
def evaluator(prf, li2, total):
li = ['Precision', 'Recall\t', 'F1 Measure']
print("EVALUATION RESULTS".center(60,'_'))
cmx = [[73.6, 81.3, 90.9, 89.9, 92.2, 87.5],
[66.1, 70.5, 83.3, 95.2, 89.0, 80.3],
[69.6, 75.5, 86.9, 92.4, 90.5, 83.5]]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
y = cmx[i]
print('%s \t %r \t\t %r \t %r \t %r \t %r \t %r' % (li[i], x[0] >= y[0], x[1] >= y[1], x[2] >= y[2],
x[3] >= y[3], x[4] >= y[4], total[i] >= y[5]))
def prf_to_csv(prf, fileName):
PRF = np.array(prf)
PRF_DF = pd.DataFrame(PRF, index=['Precision', 'Recall', 'F1 Measure', 'Support'])
PRF_DF = PRF_DF.iloc[:,:] * 100
PRF_DF.to_csv('Results/%s'%fileName)
# ----------------- PREPARING THE MACHINE --------------------------
def the_machine(X_train, X_test, y_train, y_test):
print("RANDOM FOREST CLASSIFIER RESULTS:")
rf_Classifier = RandomForestClassifier(n_estimators=50, n_jobs=4)
rf_Classifier.fit(X_train, y_train)
y_pred = rf_Classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(rf_Classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
method = 'weighted'
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print(
'%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'RandomForest_LBD.csv')
print("SVM RESULTS:")
from sklearn.svm import LinearSVC
# classifier = SVC(kernel='sigmoid', degree=3)
linsvc_classifier = LinearSVC(multi_class='crammer_singer', C=1)
linsvc_classifier.fit(X_train, y_train)
y_pred = linsvc_classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(linsvc_classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print('%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'LinearSVC_LBD.csv')
print("MULTINOMIAL NB RESULTS:")
from sklearn.naive_bayes import MultinomialNB
# classifier = SVC(kernel='sigmoid', degree=3)
multi_nb_classifier = MultinomialNB()
multi_nb_classifier.fit(X_train, y_train)
y_pred = multi_nb_classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(multi_nb_classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average=method) * 100
total_pr = precision_score(y_test, y_pred, average=method) * 100
total_re = recall_score(y_test, y_pred, average=method) * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print(
'%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4], total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'MultinomialNB_LBD.csv')
print("VOTING CLASSIFIER RESULTS:")
# BEST CLASSIFIERS
RFC_C1 = RandomForestClassifier(n_estimators=25, n_jobs=4)
LSVC_C2 = LinearSVC(multi_class='crammer_singer', C=1)
MNB_C3 = MultinomialNB()
from sklearn.ensemble import VotingClassifier
# classifier = GaussianNB()
# classifier = MultinomialNB(fit_prior=False)
classifier = VotingClassifier(estimators=[('lr', RFC_C1), ('rf', LSVC_C2),
('gnb', MNB_C3)], voting='hard', n_jobs=4)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cm1 = confusion_matrix(y_test, y_pred)
print(cm1)
prf = precision_recall_fscore_support(y_test, y_pred)
li2 = list(classifier.classes_)
li2.append('TOTAL')
li = ['Precision', 'Recall\t', 'F1 Measure']
total_f1 = f1_score(y_test, y_pred, average='macro') * 100
total_pr = precision_score(y_test, y_pred, average='micro') * 100
total_re = recall_score(y_test, y_pred, average='micro') * 100
total = [total_pr, total_re, total_f1]
print('\t\t %s %.8s \t %s \t %s \t %s %s' % (li2[0], li2[1], li2[2], li2[3], li2[4], li2[5]))
for i in range(len(prf) - 1):
x = prf[i] * 100.0
print('%s \t %.2f \t\t %.2f \t %.2f \t %.2f \t %.2f \t %.1f' % (li[i], x[0], x[1], x[2], x[3], x[4],
total[i]))
evaluator(prf, li2, total)
prf_to_csv(prf, 'VotingClassifier_LBD.csv')
def executor():
'''
streamers: Will create all 3 streams of Lemmas, Bi-grams and Dependency Relations for Training Set
sheet_generator: Does the Job of combining Features from 3 Streams to One Unique set of Feature for TrS
corrector: Corrects all the ngrams and produces a list of uni-grams and bi-grams from TrS
creating_bow: Creates Bag of Words from Corrected ngrams of TrS
streamers_test: Will create all 3 streams of Lemmas, Bi-grams and Dependency Relations for Test Set
sheet_generator_test: Does the Job of combining Features from 3 Streams to One Uniquely
corrected set of Feature for TeS
creating_bow_test: Creates Bag of Words from Corrected ngrams of TeS
ARGUMENTS
train_ds: Dataset
:return:
'''
all_streams = list()
X, y = 0, 0
max_feat = 1000
def take_feat():
max_feat = int(input('Enter No. of features (MIN:MAX) to use in Machine\n (1000:33433) Input:'))
return max_feat
while True:
global fullB, testB_1
choice = int(input("""\t\t\t\tMENU\n
-------- Data Pre-processing ---------(1 Hr 20 Mins)
1. Perform Lemmatization, Bi-grams formation \n\t\t& Dependency Relations\n
2. Combine into Unique Features (4Secs)\n
3. Create Bag of Words Model (2Secs)\n
-------- Train Test Split ----------(50 Mins)
4. Perform Pre-processing & Processing on Test Set
-------- MACHINE LEARNING ------
5. Call Machine
6. Exit
\t Choice:"""))
if choice == 1:
arr = os.listdir('Wave2')
exists = [item.startswith('stream') for item in arr if item.startswith('stream')]
if 'False' in exists:
a, b, c = streamers(fullB)
all_streams.append(a)
all_streams.append(b)
all_streams.append(c)
else:
print('\t\t\t\t\t\tALREADY PROCESSED: GO TO STEP 2')
elif choice == 2:
arr = os.listdir('Wave2')
exists = [item.startswith('stream') for item in arr if item.startswith('stream')]
if 'False' in exists:
print('\t\t\t\t\t\t[CHOICE 2] GENERATING STREAMS')
streamers(fullB)
else:
print('\t\t\t\t\t\tALREADY PROCESSED: GO TO STEP 3')
a = pd.read_csv('Wave2/stream1.csv', header=None)
b = pd.read_csv('Wave2/stream2.csv', header=None)
c = pd.read_csv('Wave2/stream3.csv', header=None)
all_streams.append(pd.Series(a[0]))
all_streams.append(pd.Series(b[0]))
all_streams.append( | pd.Series(c[0]) | pandas.Series |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = | pd.concat(frames) | pandas.concat |
#note that there is a key part of this program that requires command line interaction...
#if you run this on files that already exist, you risk overwriting guids....
#proceed line by line
import pandas as pd
import os, datetime
from src.download.box import LifespanBox
verbose = True
#verbose = False
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
root_cache='/data/intradb/tmp/box2nda_cache/'
cache_space = os.path.join(root_cache)
try:
os.mkdir(cache_space)
except:
print("cache already exists")
root_store = '/home/shared/HCP/hcpinternal/ccf-nda-behavioral/store/'
store_space = os.path.join(root_store, 'eprime') #this will be the place to save any snapshots on the nrg servers
try:
os.mkdir(store_space) #look for store space before creating it here
except:
print("store already exists")
#connect to Box
box = LifespanBox(cache=cache_space)
#subjects who already have guids:
haveguids=pd.read_csv('genguids/subjects_w_guids_16April2020IntraDBdownload.csv',header=0)
#merge with all redcap subject ids to find subjects who need guids
hcd=box.getredcapfields(['id'],study='hcpdchild')
hca=box.getredcapfields(['id'],study='hcpa')
hcd18=box.getredcapfields(['id'],study='hcpd18')
allsubjects=pd.concat([hcd,hca],axis=0)
#drop the withdrawns
allsubjects=allsubjects.loc[allsubjects.flagged.isnull()==True][['subject_id','id']]
all18subjects=hcd18.loc[hcd18.flagged.isnull()==True][['subject_id','id']]
#did this already....circling back to do the hcpd18 folks after moving the used psudo guids to 'used' and generating more new ones to fill holes
allwguids=pd.merge(haveguids,allsubjects,how='right',left_on='Subject',right_on='subject_id')
allwguids['pseudo_guid']=allwguids.nda_guid
#find how many need new ids
a=len(allwguids.loc[allwguids.pseudo_guid.isnull()==True])
all18wguids= | pd.merge(haveguids,all18subjects,how='right',left_on='Subject',right_on='subject_id') | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# In[48]:
import pandas as pd
import urllib
import numpy as np
import json
from tqdm.autonotebook import tqdm
#%matplotlib inline
tqdm.pandas()
import dask.dataframe as dd
from dask.multiprocessing import get
from dask.diagnostics import ProgressBar
from datetime import datetime
import matplotlib.pyplot as plt
from IPython.display import display
# In[41]:
import urllib3
# In[42]:
http = urllib3.PoolManager()
# In[43]:
from config_batch import *
# # Functions
# In[44]:
ws_hostname = "127.0.1.1"
ws_hostname = "10.1.0.45"
# ws_hostname = "192.168.1.3"
# In[45]:
def call_ws(addr_data, check_result=True, structured_osm=False): #lg = "en,fr,nl"
t = datetime.now()
params = urllib.parse.urlencode({"street": addr_data[street_field],
"housenumber": addr_data[housenbr_field],
"city": addr_data[city_field],
"postcode": addr_data[postcode_field],
"country": addr_data[country_field],
"check_result" : "yes" if check_result else "no",
"struct_osm" : "yes" if structured_osm else "no"
})
url = f"http://{ws_hostname}:5000/search/?{params}"
print(url)
try:
with urllib.request.urlopen(url) as response:
res = response.read()
res = json.loads(res)
# print(res)
res["time"] = datetime.now() - t
return res
except Exception as e:
return str(e)
# In[16]:
def call_ws_batch(addr_data, mode="geo", with_reject=False, check_result=True, structured_osm=False): #lg = "en,fr,nl"
# print(addr_data)
# print(addr_data.shape)
# print()
file_data = addr_data.rename(columns = {
street_field : "street",
housenbr_field: "housenumber",
postcode_field: "postcode",
city_field: "city",
country_field: "country",
addr_key_field : "addr_key"
}).to_csv(index=False)
r = http.request(
'POST',
f'http://{ws_hostname}:5000/batch',
fields= {
'media': ('addresses.csv', file_data),
'mode': mode,
"with_rejected" : "yes" if with_reject else "no",
"check_result" : "yes" if check_result else "no",
"struct_osm" : "yes" if structured_osm else "no"
})
try:
res = pd.DataFrame(json.loads(r.data.decode('utf-8')))
except ValueError:
print("Cannot decode result:")
print(json.loads(r.data.decode('utf-8')))
return
# display(res)
return res
# In[46]:
def expand_json(addresses):
addresses["status"]= addresses.json.apply(lambda d: "error" if "error" in d else "match" if "match" in d else "rejected")
addresses["time"] = addresses.json.apply(lambda d: d["time"])
addresses["timing"] = addresses.json.apply(lambda d: d["timing"] if "timing" in d else {})
addresses["method"]= addresses.json.apply(lambda d: d["match"][0]["method"] if len(d)>0 and "match" in d else "none")
for field in ["street", "number", "postcode", "city"]:
addresses[field]= addresses.json.apply(lambda d: d["match"][0]["addr_out_"+field] if len(d)>0 and "match" in d else "")
return
# # Calls
# ## Single address calls
# In[49]:
call_ws({street_field: "Av. Fonsny",
housenbr_field: "20",
city_field: "Saint-Gilles",
postcode_field: "1060",
country_field: "Belgium"}, check_result=True, structured_osm=False)
# In[21]:
call_ws({street_field: "",
housenbr_field: "",
city_field: "Dinant",
postcode_field: "5500",
country_field: "Belgium"}, check_result=True, structured_osm=True)
# In[11]:
call_ws({street_field: "Fechtergasse",
housenbr_field: "16/13",
city_field: "Wenen",
postcode_field: "1090",
country_field: "Oostenrijk"}, check_result=False, structured_osm=False)
# In[12]:
call_ws({street_field: "Fechtergasse 16/13 1090 Wenen",
housenbr_field: "",
city_field: "",
postcode_field: "",
country_field: "Oostenrijk"}, check_result=False, structured_osm=False)
# ## Batch calls (row by row)
# In[38]:
addresses = get_addresses("address.csv.gz")
addresses = addresses.sample(100).copy()
# ### Simple way
# In[74]:
addresses["json"] = addresses.progress_apply(call_ws, check_result=True, structured_osm=False, axis=1)
# ### Using Dask
# In[17]:
dd_addresses = dd.from_pandas(addresses, npartitions=4)
dask_task = dd_addresses.apply(call_ws, meta=('x', 'str'), axis=1)
with ProgressBar():
addresses["json"] = dask_task.compute()
# In[26]:
expand_json(addresses)
# In[27]:
addresses
# ## Batch calls (batch WS)
# ### Single block
# In[39]:
# Only geocoding
# addresses["StreetFR"] = ""
call_ws_batch(addresses, mode="geo", check_result=True, structured_osm=True)
# In[62]:
# Geocode + address
call_ws_batch(addresses, mode="short")
# In[63]:
# Geocode + address, with rejected addresses
call_ws_batch(addresses, mode="long", with_reject=True)
# ### Batch blocs
# In[21]:
def call_ws_batch_chunks(addr_data, mode="geo", with_reject=False, check_result=True, structured_osm=False, chunk_size=100):
## TODO : find a better way with dask? It seems that map_partitions does not support function returning dataframes.
chunks = np.array_split(addr_data, addr_data.shape[0]//chunk_size)
res= [call_ws_batch(chunk, mode=mode,
check_result=check_result,
structured_osm=structured_osm) for chunk in tqdm(chunks)]
df_res = pd.concat(res, sort=False)
return df_res
# In[ ]:
df_res = call_ws_batch_chunks(addresses, chunk_size=10)
df_res
# In[1]:
df_res.method.value_counts()
# ## Comparing options
# In[19]:
addresses = get_addresses("address.csv.gz")
addresses = addresses[addresses[country_field] == "Belgique"]
addresses = addresses.sample(10000).copy()
# In[22]:
results = {}
it_per_seconds=pd.DataFrame()
for check_label in ["check", "nocheck"]:
for struct_label in ["struct", "unstruct" ]:
print(check_label, struct_label)
start=datetime.now()
results[(check_label, struct_label)] = call_ws_batch_chunks(addresses,
mode="short",
check_result = check_label == "check",
structured_osm = struct_label == "struct")
it_per_seconds.loc[check_label, struct_label] = addresses.shape[0] / (datetime.now()-start).total_seconds()
print("Iterations per seconds:")
it_per_seconds
# In[23]:
print("Match rate")
pd.DataFrame({k1: {k2: results[(k1,k2)].shape[0]/addresses.shape[0] for k2 in ["struct", "unstruct"]}
for k1 in ["check","nocheck"]})
# In[24]:
print("Match rate (without nostreet)")
pd.DataFrame({k1: {k2: results[(k1,k2)].query("method!='nostreet'").shape[0]/addresses.shape[0] for k2 in ["struct", "unstruct"]}
for k1 in ["check","nocheck"]})
# In[25]:
print("Unmatched addresses")
for k1 in results:
print(k1)
nomatch=addresses[~addresses[addr_key_field].isin(results[k1]["addr_key"])]
display(nomatch)
print(nomatch[country_field].value_counts())
# In[26]:
vc_values = pd.DataFrame(columns=results.keys(), index=results.keys())
for k1 in results:
vc_values.loc[k1, k1] = results[k1].shape[0]
for k2 in results:
if k1>k2:
r1=results[k1]
r2=results[k2]
mg = r1[["addr_key", "place_id"]].merge(r2[["addr_key", "place_id"]], on="addr_key", how="outer", indicator=True)
vc = mg._merge.value_counts()
mismatches = mg[mg.place_id_x != mg.place_id_y][["addr_key"]]
mismatches = mismatches.merge(addresses.rename({addr_key_field:"addr_key"}, axis=1))
mismatches = mismatches.merge(r1[["addr_key", "addr_out_street", "addr_out_number", "extra_house_nbr", "addr_out_postcode", "addr_out_city"]], on="addr_key")
mismatches = mismatches.merge(r2[["addr_key", "addr_out_street", "addr_out_number", "extra_house_nbr", "addr_out_postcode", "addr_out_city"]], on="addr_key")
mismatches.columns = | pd.MultiIndex.from_arrays([["Input"]*6 + [f"x:{k1}"]*5 + [f"y:{k2}"]*5, mismatches.columns]) | pandas.MultiIndex.from_arrays |
#######################
# Header.R
from datetime import time
from operator import index
from os import path, times
import numpy as np
import pandas as pd
import os
import logging
from pathlib import Path
from pandas.core.reshape.merge import merge
from powergenome.util import regions_to_keep
from powergenome.us_state_abbrev import (state2abbr, abbr2state)
path_in = r"..\data\load_profiles_data\input" # fix
#read in state proportions
#how much state load should be distributed to GenXRegion
# pop = pd.read_parquet(path_in + "\GenX_State_Pop_Weight.parquet")
pop = pd.read_parquet(path_in + "\ipm_state_pop_weight_20210517.parquet")
states = pop.drop_duplicates(subset=["State"])["State"]
states_abb = list(map(state2abbr, states))
pop["State"] = list(map(state2abbr, pop["State"]))
states_eastern_abbr = ["ME","VT","NH","MA","RI","CT","NY","PA","NJ","DE","MD","DC","MI","IN","OH","KY","WV","VA","NC","SC","GA","FL"]
states_central_abbr = ["IL","MO","TN","AL","MS","WI","AR","LA","TX","OK","KS","NE","SD","ND","IA","MN"]
states_mountain_abbr = ["MT","WY","CO","NM","AZ","UT","ID"]
states_pacific_abbr = ["CA","NV","OR","WA"]
states_eastern = list(map(abbr2state, states_eastern_abbr))
states_central = list(map(abbr2state, states_central_abbr))
states_mountain = list(map(abbr2state, states_mountain_abbr))
states_pacific = list(map(abbr2state, states_pacific_abbr))
# some parameters
stated_states = ["New Jersey", "New York", "Virginia"]
# Date Jan 29, 2021
# (2) PA, NJ, VA, NY, MI all have EV and heat pump stocks from NZA DD case
# consistent with their economywide decarbonization goals.
# https://www.c2es.org/content/state-climate-policy/
# Date Feb 10, 2021
# Remove high electrification growth in PA and MI in stated policies;
# they dont have clean energy goals so kind of confusing/inconsistent to require high electrification in these states.
# So our new "Stated Policies" definition for electrification is states
# with BOTH economywide emissions goals + 100% carbon-free electricity standards
# = NY, NJ, VA.
stated_states_abbr = list(map(state2abbr, stated_states))
#years = ["2022", "2025", "2030", "2040", "2050"]
cases = ["current_policy", "stated_policy", "deep_decarbonization"]
running_sector = ['Residential','Residential', 'Commercial', 'Commercial','Transportation','Transportation','Transportation', 'Transportation']
running_subsector = ['space heating and cooling','water heating', 'space heating and cooling', 'water heating','light-duty vehicles','medium-duty trucks','heavy-duty trucks','transit buses']
Nsubsector = len(running_subsector)
logger = logging.getLogger(__name__)
#Define function for adjusting time-difference
def addhour(x):
x += 1
x = x.replace(8761, 1)
return x
def SolveThreeUnknowns(a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3):
D = a1*b2*c3 + b1*c2*a3 + c1*a2*b3 - a1*c2*b3 - b1*a2*c3 - c1*b2*a3
Dx = d1*b2*c3 + b1*c2*d3 + c1*d2*b3 - d1*c2*b3 - b1*d2*c3 - c1*b2*d3
Dy = a1*d2*c3 + d1*c2*a3 + c1*a2*d3 - a1*c2*d3 - d1*a2*c3 - c1*d2*a3
Dz = a1*b2*d3 + b1*d2*a3 + d1*a2*b3 - a1*d2*b3 - b1*a2*d3 - d1*b2*a3
Sx = Dx/D
Sy = Dy/D
Sz = Dz/D
d = {'Sx':Sx, 'Sy':Sy, 'Sz':Sz}
return pd.DataFrame(d)
def SolveTwoUnknowns(a1, b1, c1, a2, b2, c2):
D = a1*b2 - a2*b1
Dx = c1*b2 - c2*b1
Dy = a1*c2 - a2*c1
Sx = Dx/D
Sy = Dy/D
d = {'Sx':Sx, 'Sy':Sy}
return pd.DataFrame(d)
def CreateOutputFolder(case_folder):
path = case_folder / "extra_outputs"
if not os.path.exists(path):
os.makedirs(path)
######################################
# CreatingBaseLoad.R
def CreateBaseLoad(years, regions, output_folder, path_growthrate):
path_processed = path_in
path_result = output_folder.__str__()
years = years
regions = regions
path_growthrate = path_growthrate
## Method 3: annually
EFS_2020_LoadProf = pd.read_parquet(path_in + "\EFS_REF_load_2020.parquet")
EFS_2020_LoadProf = pd.merge(EFS_2020_LoadProf, pop, on = ["State"])
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["LoadMW"]*EFS_2020_LoadProf["State Prop"])
EFS_2020_LoadProf = EFS_2020_LoadProf.groupby(["Year", "GenX.Region", "LocalHourID", "Sector", "Subsector"], as_index = False).agg({"weighted" : "sum"})
# Read in 2019 Demand
Original_Load_2019 = pd.read_parquet(path_in + "\ipm_load_curves_2019_EST.parquet")
# Reorganize Demand
Original_Load_2019 = Original_Load_2019.melt(id_vars="LocalHourID").rename(columns={"variable" : "GenX.Region", "value": "LoadMW_original"})
Original_Load_2019 = Original_Load_2019.groupby(["LocalHourID"], as_index = False).agg({"LoadMW_original" : "sum"})
ratio_A = Original_Load_2019["LoadMW_original"].sum() / EFS_2020_LoadProf["weighted"].sum()
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["weighted"]*ratio_A)
Base_Load_2019 = EFS_2020_LoadProf.rename(columns ={"weighted" : "LoadMW"})
breakpoint()
# Read in the Growth Rate
GrowthRate = pd.read_parquet(path_in + "\ipm_growthrate_2019.parquet")
try:
GrowthRate = pd.read_parquet(path_growthrate)
except:
pass
# Create Base loads
Base_Load_2019 = Base_Load_2019[Base_Load_2019["GenX.Region"].isin(regions)]
Base_Load_2019.loc[(Base_Load_2019["Sector"] == "Industrial") & (Base_Load_2019["Subsector"].isin(["process heat", "machine drives"])), "Subsector"] = "other"
Base_Load_2019 = Base_Load_2019[Base_Load_2019["Subsector"] == "other"]
Base_Load_2019 = Base_Load_2019.groupby(["Year", "LocalHourID", "GenX.Region", "Sector"], as_index= False).agg({'LoadMW' : 'sum'})
Base_Load = Base_Load_2019
for y in years:
ScaleFactor = GrowthRate.assign(ScaleFactor = (1+GrowthRate["growth_rate"])**(int(y) - 2019)) \
.drop(columns = "growth_rate")
Base_Load_temp = pd.merge(Base_Load_2019, ScaleFactor, on = ["GenX.Region"])
Base_Load_temp = Base_Load_temp.assign(Year = y, LoadMW = Base_Load_temp["LoadMW"]*Base_Load_temp["ScaleFactor"])\
.drop(columns = "ScaleFactor")
Base_Load = Base_Load.append(Base_Load_temp, ignore_index=True)
Base_Load.to_parquet(path_result + "\Base_Load.parquet", index = False)
del Base_Load, Base_Load_2019, Base_Load_temp, ScaleFactor,GrowthRate, Original_Load_2019
#####################################
# Add_Electrification.R
def AddElectrification(years, regions, electrification, output_folder, path_stock):
path_processed = path_in
path_result = output_folder.__str__()
path_stock = path_stock
years = years
electrification = electrification
regions = regions
#Creating Time-series
SCENARIO_STOCK = pd.read_parquet(path_processed + "\SCENARIO_STOCK.parquet")
SCENARIO_STOCK = SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"].isin(years)) & (SCENARIO_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK_temp = pd.DataFrame()
for year, case in zip(years, electrification):
SCENARIO_STOCK_temp = SCENARIO_STOCK_temp.append(SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"] == year) & (SCENARIO_STOCK["SCENARIO"] == case)])
SCENARIO_STOCK = SCENARIO_STOCK_temp
del SCENARIO_STOCK_temp
try:
CUSTOM_STOCK = pd.read_parquet(path_stock)
CUSTOM_STOCK = CUSTOM_STOCK[(CUSTOM_STOCK["YEAR"].isin(years)) & (CUSTOM_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK = SCENARIO_STOCK.append(CUSTOM_STOCK)
except:
pass
#Method 1 Calculate from Type1 and Type 2
for i in range(0, Nsubsector):
timeseries = pd.read_parquet(path_processed + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Incremental_Factor.parquet")
timeseries = timeseries[["State", "Year", "LocalHourID", "Unit", "Factor_Type1", "Factor_Type2" ]]
stock_temp = SCENARIO_STOCK[(SCENARIO_STOCK["SECTOR"] == running_sector[i]) & (SCENARIO_STOCK["SUBSECTOR"] == running_subsector[i])]
stock_temp = stock_temp[["SCENARIO", "STATE", "YEAR", "AGG_STOCK_TYPE1", "AGG_STOCK_TYPE2"]].rename(columns={"STATE" : "State", "YEAR" : "Year"})
years_pd = pd.Series(years)
IF_years = pd.Series(timeseries["Year"].unique())
for year in years_pd:
exists = year in IF_years.values
if not exists:
diff = np.array(IF_years - year)
index = diff[np.where(diff <= 0)].argmax()
year_approx = IF_years[index]
timeseries_temp = timeseries[timeseries["Year"] == year_approx]
timeseries_temp["Year"] = year
logger.warning("No incremental factor available for year " + str(year) + ": using factors from year " + str(year_approx) + ".")
timeseries = timeseries.append(timeseries_temp)
timeseries = pd.merge(timeseries, stock_temp, on = ["State", "Year"])
timeseries = timeseries.assign(LoadMW = timeseries["AGG_STOCK_TYPE1"]*timeseries["Factor_Type1"] + timeseries["AGG_STOCK_TYPE2"]*timeseries["Factor_Type2"])
timeseries = timeseries[["SCENARIO", "State", "Year", "LocalHourID", "LoadMW"]].dropna()
timeseries.to_parquet(path_result + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Scenario_Timeseries_Method1.parquet", index = False)
del timeseries, stock_temp
##########################
# Read in time series and combine them
Method = "Method1"
Res_SPH = | pd.read_parquet(path_result + "\Residential_space heating and cooling_Scenario_Timeseries_" + Method + ".parquet") | pandas.read_parquet |
import math
import pandas as pd
import os
from datetime import datetime, timedelta
import csv
def OpenFile(filePath):
"""
Opens a shapefile in QGIS
Parameters
----------
filePath :STRING
Path to the shapefile to open
Returns
-------
layer : QgsVectorLayer
"""
layer = iface.addVectorLayer(filePath,"shape","ogr")
if not layer:
print ('Could not open %s' % (filePath))
return None
else:
print ('Opened %s' % (filePath))
return layer
def makeDataFrame(layer):
"""
Prepare pandas dataframe from layer, containing specified field in the dataframe
Parameters
----------
layer(string): layer to covert to dataframe
Returns
-------
df (pandas dataframe)
"""
columns=[]
caps = layer.dataProvider().capabilities()
if caps & QgsVectorDataProvider.DeleteAttributes:
fields = layer.dataProvider().fields()
for field in fields:
columns.append(field.name())
df= | pd.DataFrame(columns=columns) | pandas.DataFrame |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, | pd.Timestamp('2015-01-07') | pandas.Timestamp |
from Happy_4 import solar_input
from Happy_4 import wind_input
import pandas as pd
'''
This file can use data from 2007 to 2012 to calculate the average daily data.
'''
def leap_year_bool(yr):
if yr % 4 ==0:
if yr % 100 ==0:
if yr % 400 ==0:
return True
else:
return False
else:
return True
else:
return False
def average_daily_solar(lat, lon):
# collect the data of 2007, 2009, 2010, 2011
daily_solar_total = | pd.DataFrame() | pandas.DataFrame |
"""Tools used for clustering analysis"""
import csv
__author__ = "<NAME> (http://www.vmusco.com)"
import numpy
import os
import pandas
from mlperf.clustering.clusteringtoolkit import ClusteringToolkit
class DatasetFacts:
"""Object alternative to method read_dataset"""
def __init__(self, data):
self.data = data
self.file_path = None
def set_data(self, data):
self.data = data
def target(self):
return self.data.target
def ground_truth_cluster_ids(self):
return self.target().unique()
def nb_clusters(self):
return len(self.ground_truth_cluster_ids())
def data_without_target(self):
return self.data.loc[:, self.data.columns != 'target']
def nb_instances(self):
"""number of instances"""
return self.data.shape[0]
def nb_features(self):
"""number of features (excluding target)"""
return self.data.shape[1] - 1
@staticmethod
def read_dataset(source_file, sep='\t'):
chunksize = 100000
text_file_reader = pandas.read_csv(source_file, sep=sep, chunksize=chunksize, iterator=True)
data = pandas.concat(text_file_reader, ignore_index=True)
ret = DatasetFacts(data)
ret.file_path = source_file
return ret
def run_for_nr(run_base, variant, algorithm, run_id):
return "{}-{}-{}".format(variant, algorithm, run_id)
def read_dataset(source_file):
print("Reading file {}...".format(source_file))
chunksize = 100000
text_file_reader = | pandas.read_csv(source_file, sep='\t', chunksize=chunksize, iterator=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
detect the distribution of all features in different diseases
also, for end_all_1230_ch only
"""
import numpy as np
import pandas
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import f_load_data
import f_parameters
import f_preprocess
from sklearn.utils import shuffle
import f_single_feature_distribution
def heatmap(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
data = f_preprocess.data_normalization(data, have_target=True)
print("data.shape ->", data.shape)
important[0].append(data.shape[1] - 1)
select_col = []
for i in range(len(important[0])):
select_col.append(data.columns[important[0][i]])
data_selected = pandas.DataFrame(data, columns=select_col)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
size = len(data_selected.columns)
plt.subplots(figsize=(size, size))
sns.heatmap(data_selected.corr(), annot=True, vmax=1, square=True,
yticklabels=data_selected.columns.values.tolist(),
xticklabels=data_selected.columns.values.tolist(), cmap="RdBu")
plt.title("heatmap")
plt.show()
def multi_feature(data, important):
print("multi-feature distribution...")
# f_preprocessing
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
data = f_preprocess.data_normalization(data, have_target=True)
print("data.shape ->", data.shape)
important[0].append(data.shape[1] - 1)
select_col = []
for i in range(len(important[0])):
select_col.append(data.columns[important[0][i]])
data_selected = pandas.DataFrame(data, columns=select_col)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
size = len(data_selected.columns)
# Andrews Curves involve using attributes of samples as coefficients for Fourier series and then plotting these
| pd.plotting.andrews_curves(data_selected, data_selected.columns[size - 1], color=["green", "red"]) | pandas.plotting.andrews_curves |
from chachies import chachifuncs as ccf
from chachies.descriptors import process
from chachies.descriptors import fitters
import glob
import os
import pandas as pd
import pickle
from sklearn import linear_model
from sklearn import preprocessing
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
# Import data
k_list = [f for f in glob.glob('data/K_descriptors/*.xlsx')]
c_list = [f for f in glob.glob('data/C_descriptors/*.xlsx')]
c_data = pd.DataFrame()
for each in c_list:
df = pd.read_excel(each)
c_data = c_data.append(df, ignore_index=True)
k_data = pd.DataFrame()
for each in k_list:
df = pd.read_excel(each)
k_data = k_data.append(df, ignore_index=True)
data = c_data.append(k_data)
data = data.T.drop_duplicates().T
# reset index and provide 0 1 classifiers
data = data.reset_index(drop=True)
for i in range(len(data)):
if data.loc[i, ('names')].startswith('CS2_33'):
data.loc[i, ('label')] = 'LiCoO2'
data.loc[i, ('lasso')] = 0
else:
data.loc[i, ('label')] = 'LiFePO4'
data.loc[i, ('lasso')] = 1
# split data
train, test = train_test_split(data, test_size=0.2, random_state=1010)
# choose data
train_y = train['lasso'] # what are we predicting
test_y = test['lasso']
train_x = train[['ch_5', 'ch_7', 'dc_5']] # from LASSO
test_x = test[['ch_5', 'ch_7', 'dc_5']]
train_x_scaled = preprocessing.normalize(train_x, norm='l1')
test_x_scaled = preprocessing.normalize(test_x, norm='l1')
lin_svc = svm.LinearSVC().fit(train_x, train_y)
trainpred = lin_svc.predict(train_x_scaled) # predict train data
testpred = lin_svc.predict(test_x_scaled)
filename = 'svc_model.sav'
pickle.dump(lin_svc, open(filename, 'wb'))
# load model from disk
class chachies_class:
def clean(rootdir, path_to_raw_data_folder):
'''Gets all raw data from the rootdir (ie 'data/') and specified folder
(path_to_raw_data_folder), i.e. 'Source_Data' (which is within rootdir),
and then:
1. separates it into raw cycles and puts them in a folder
(data/Separated_Cycles/)
2. cleans those separated cycles and puts them in a folder
(data/Clean_Separated_Cycles/)
3. recombines the cleaned, separated cycles and saves those
data sets in a folder (data/Clean_Whole_Sets/). These folders
do not have to have existed previously.'''
assert os.path.exists(rootdir) == 1
if not os.path.exists(rootdir):
print('The specified rootdir does not exist.')
if not os.path.exists(rootdir+'Separated_Cycles/'):
os.makedirs(rootdir+'Separated_Cycles/')
if not os.path.exists(rootdir+'Clean_Separated_Cycles/'):
os.makedirs(rootdir + 'Clean_Separated_Cycles/')
if not os.path.exists(rootdir + 'Clean_Whole_Sets/'):
os.makedirs(rootdir + 'Clean_Whole_Sets/')
ccf.load_sep_cycles(rootdir + path_to_raw_data_folder, rootdir + 'Separated_Cycles/')
ccf.get_clean_cycles(rootdir + 'Separated_Cycles/',rootdir + 'Clean_Separated_Cycles/')
ccf.get_clean_sets(rootdir + 'Clean_Separated_Cycles/', rootdir+'Clean_Whole_Sets/')
return
def get_descriptors(import_filepath):
"""Generates a dataframe containing charge and discharge
descriptors/error parameters. Also writes descriptors to an
excel spreadsheet 'describe.xlsx' import_filepath = filepath
containing cleaned separated cycles"""
# checks that the file exists
assert os.path.exists(import_filepath), 'The file does not exist'
# check that the whatever is passed to ML_generate is a string
assert isinstance(import_filepath, str), 'The input should be a string'
# creates dataframe of descriptors for the charge/discharge
# cycles of all batteries
df_ch = process.df_generate(import_filepath, 'c')
df_dc = process.df_generate(import_filepath, 'd')
# concats charge and discharge cycles
df_final = | pd.concat([df_ch, df_dc], axis=1) | pandas.concat |
from typing import List, Dict, Tuple, Sequence, Union
from sympy import Eq, sympify, IndexedBase, Expr, Symbol, expand, Indexed
import pandas as pd
import numpy as np
from sympy.logic.boolalg import BooleanFalse, BooleanTrue
from finstmt.items.config import ItemConfig
PLUG_SCALE = 1e11
def sympy_dict_to_results_dict(
s_dict: Dict[IndexedBase, float],
forecast_dates: pd.DatetimeIndex,
item_configs: List[ItemConfig],
t_offset: int = 0
) -> Dict[str, pd.Series]:
item_config_dict: Dict[str, ItemConfig] = {config.key: config for config in item_configs}
new_results = {}
for expr in s_dict:
key = str(expr.base)
try:
config = item_config_dict[key]
except KeyError:
# Must be pct of item, don't need in final results
continue
new_results[key] = | pd.Series(index=forecast_dates, dtype='float', name=config.primary_name) | pandas.Series |
"""the simple baseline for autograph"""
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, JumpingKnowledge
from torch_geometric.data import Data
from torch_geometric.nn import Node2Vec
from torch.utils.data import DataLoader
import networkx as nx
import random
from collections import Counter
from utils import normalize_features
import scipy.sparse as sp
from appnp import APPNPTrainer
from daydayup_model import GCNTrainer, TAGTrainer, XGBTrainer
from scipy import stats
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
from daydayup_private_features import dayday_feature, dayday_feature_old
def fix_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
fix_seed(1234)
class Model:
def __init__(self):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def generate_pyg_data_appnp(self, data, x, edge_index):
graph = nx.from_edgelist(edge_index)
features= normalize_features(x)
num_nodes = features.shape[0]
target = np.zeros(num_nodes, dtype=np.long)
inds = data['train_label'][['node_index']].to_numpy()
train_y = data['train_label'][['label']].to_numpy()
target[inds] = train_y
train_indices = data['train_indices']
test_indices = data['test_indices']
return graph, features, target, train_indices, test_indices
def generate_pyg_data_gcn(self, data, x, edge_index):
x = torch.tensor(x, dtype=torch.float)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_weight = data['edge_file']['edge_weight'].to_numpy()
edge_weight = torch.tensor(edge_weight, dtype=torch.float32)
num_nodes = x.size(0)
y = torch.zeros(num_nodes, dtype=torch.long)
inds = data['train_label'][['node_index']].to_numpy()
train_y = data['train_label'][['label']].to_numpy()
y[inds] = torch.tensor(train_y, dtype=torch.long)
train_indices = data['train_indices']
test_indices = data['test_indices']
data = Data(x=x, edge_index=edge_index, y=y, edge_weight=edge_weight)
data.num_nodes = num_nodes
train_mask = torch.zeros(num_nodes, dtype=torch.bool)
train_mask[train_indices] = 1
data.train_mask = train_mask
test_mask = torch.zeros(num_nodes, dtype=torch.bool)
test_mask[test_indices] = 1
data.test_mask = test_mask
return data
def train_predict(self, data, time_budget, n_class, schema):
flag_feature = 1
sp_density = 0.0
flag_zero = 1
x = data['fea_table']
if x.shape[1] == 1:
x = x.to_numpy()
x = x.reshape(x.shape[0])
x = np.array(pd.get_dummies(x), dtype=np.float)
flag_feature = 0
else:
x.replace([np.inf, -np.inf], np.nan, inplace=True)
x.fillna(0, inplace=True)
x = x.drop('node_index', axis=1).to_numpy()
x_max = x.max()
x_min = x.min()
if x_max == x_min:
x = np.arange(x.shape[0])
x = np.array( | pd.get_dummies(x) | pandas.get_dummies |
import re
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf import melt as cudf_melt
from cudf.core import DataFrame
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
)
@pytest.mark.parametrize("num_id_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_value_vars", [0, 1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_melt(nulls, num_id_vars, num_value_vars, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some", "all"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
id_vars = []
for i in range(num_id_vars):
colname = "id" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
id_vars.append(colname)
value_vars = []
for i in range(num_value_vars):
colname = "val" + str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
elif nulls == "all":
data[:] = np.nan
pdf[colname] = data
value_vars.append(colname)
gdf = DataFrame.from_pandas(pdf)
got = cudf_melt(frame=gdf, id_vars=id_vars, value_vars=value_vars)
got_from_melt_method = gdf.melt(id_vars=id_vars, value_vars=value_vars)
expect = pd.melt(frame=pdf, id_vars=id_vars, value_vars=value_vars)
# pandas' melt makes the 'variable' column of 'object' type (string)
# cuDF's melt makes it Categorical because it doesn't support strings
expect["variable"] = expect["variable"].astype("category")
assert_eq(expect, got)
assert_eq(expect, got_from_melt_method)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize(
"dtype",
list(NUMERIC_TYPES + DATETIME_TYPES)
+ [pytest.param("str", marks=pytest.mark.xfail())],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_df_stack(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame()
for i in range(num_cols):
colname = str(i)
data = np.random.randint(0, 26, num_rows).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.stack()
expect = pdf.stack()
if {None} == set(expect.index.names):
expect.rename_axis(
list(range(0, len(expect.index.names))), inplace=True
)
assert_eq(expect, got)
pass
@pytest.mark.parametrize("num_rows", [1, 2, 10, 1000])
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize(
"dtype", NUMERIC_TYPES + DATETIME_TYPES + ["category"]
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_interleave_columns(nulls, num_cols, num_rows, dtype):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(0, 26, num_rows)).astype(dtype)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
if dtype == "category":
with pytest.raises(ValueError):
assert gdf.interleave_columns()
else:
got = gdf.interleave_columns()
expect = pd.Series(np.vstack(pdf.to_numpy()).reshape((-1,))).astype(
dtype
)
assert_eq(expect, got)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 1000])
@pytest.mark.parametrize("count", [1, 2, 10])
@pytest.mark.parametrize("dtype", ALL_TYPES)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_tile(nulls, num_cols, num_rows, dtype, count):
if dtype not in ["float32", "float64"] and nulls in ["some"]:
pytest.skip(msg="nulls not supported in dtype: " + dtype)
pdf = pd.DataFrame(dtype=dtype)
for i in range(num_cols):
colname = str(i)
data = pd.Series(np.random.randint(num_cols, 26, num_rows)).astype(
dtype
)
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = np.nan
pdf[colname] = data
gdf = DataFrame.from_pandas(pdf)
got = gdf.tile(count)
expect = pd.DataFrame( | pd.concat([pdf] * count) | pandas.concat |
import pandas as pd
def calculo_juros_compostos(montante_inicial, aportes_mensais, taxa_mensal, tempo_meses):
df = | pd.DataFrame({'Capital' : [montante_inicial], 'Juros Mensais': [0.0], 'Juros Acumulados': [0.0], 'Valor Aportado' : [montante_inicial]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 10:01:17 2020
@author: abhi0
"""
##############################################################################
############# Loading the libraries and files #############################
#############################################################################
from numpy import array
import json
from fastai.text import *
import numpy as np
from sklearn.feature_selection import chi2
import ktrain
from ktrain import text
#Loading the files:
def parse_data(file):
for l in open(file,'r'):
yield json.loads(l)
data =\
list(parse_data('C:/Users/abhi0/OneDrive/Documents/Data_on_Sattire_and_Sarcasm/Sarcasm_Headlines_Dataset.json'))
#Not used to avoid severe class-imbalance. Model would never generalise to
#any sort of data in the world, if used, without increasing the sarcasm
#data-set. Not using this gives a good ratio of the two classes.
data2=\
list(parse_data('C:/Users/abhi0/OneDrive/Documents/Data_on_Sattire_and_Sarcasm/Sarcasm_Headlines_Dataset_v2.json'))
#Extracting the labels,indices and the headlines corresponding to sarcastic
#statements for the first set
sarIdx=[]
Headline=[]
Labels=[]
for i in range(len(data)):
if data[i]['is_sarcastic']==1:
sarIdx.append(i)
Headline.append(data[i]['headline'])
Labels.append('Sarcasm')
###'sattire' data-set:
import pandas as pd
data3= | pd.read_csv('C:/Users/abhi0/OneDrive/Documents/Data_on_Sattire_and_Sarcasm/OnionOrNot.csv') | pandas.read_csv |
#!/home/cab22/miniconda3/bin/python
#SBATCH --account=commons
#SBATCH --export=All
#SBATCH --partition=commons
#SBATCH --time=24:00:00
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=2
#SBATCH --gres=gpu:1
#SBATCH --time=24:00:00
#SBATCH --export=ALL
#SBATCH --array=0-15
#SBATCH --mem=16G
import os
import subprocess
import itertools
import numpy as np
import warnings
import pandas
import time
import argparse
class SlurmJobArray():
""" Selects a single condition from an array of parameters using the SLURM_ARRAY_TASK_ID environment variable. The parameters need to be supplied as a dictionary. if the task is not in a slurm environment, the test parameters will supersede the parameters, and the job_id would be taken as 0. Example:
parameters={"epsilon":[100],
"aligned":[True,False],
"actinLen":[20,40,60,80,100,120,140,160,180,200,220,240,260,280,300],
"repetition":range(5),
"temperature":[300],
"system2D":[False],
"simulation_platform":["OpenCL"]}
test_parameters={"simulation_platform":"CPU"}
sjob=SlurmJobArray("ActinSimv6", parameters, test_parameters)
:var test_run: Boolean: This simulation is a test
:var job_id: SLURM_ARRAY_TASK_ID
:var all_parameters: Parameters used to initialize the job
:var parameters: Parameters for this particular job
:var name: The name (and relative path) of the output
"""
def __init__(self, name, parameters, test_parameters={},test_id=0):
"""
Args:
name:
parameters:
Returns:
name:
parameters:
"""
self.all_parameters=parameters
self.test_parameters=test_parameters
#Parse the slurm variables
self.slurm_variables={}
for key in os.environ:
if len(key.split("_"))>1 and key.split("_")[0]=='SLURM':
self.slurm_variables.update({key:os.environ[key]})
#Check if there is a job id
self.test_run=False
try:
self.job_id=int(self.slurm_variables["SLURM_ARRAY_TASK_ID"])
except KeyError:
self.test_run=True
warnings.warn("Test Run: SLURM_ARRAY_TASK_ID not in environment variables")
self.job_id=test_id
keys=parameters.keys()
self.all_conditions=list(itertools.product(*[parameters[k] for k in keys]))
self.parameter=dict(zip(keys,self.all_conditions[self.job_id]))
#The name only includes enough information to differentiate the simulations.
self.name=f"{name}_{self.job_id:03d}_" + '_'.join([f"{a[0]}_{self[a]}" for a in self.parameter if len(self.all_parameters[a])>1])
def __getitem__(self, name):
if self.test_run:
try:
return self.test_parameters[name]
except KeyError:
return self.parameter[name]
else:
return self.parameter[name]
def __getattr__(self, name: str):
""" The keys of the parameters can be called as attributes
"""
if name in self.__dict__:
return object.__getattribute__(self, name)
elif name in self.parameter:
return self[name]
else:
return object.__getattribute__(self, name)
def __repr__(self):
return str(self.parameter)
def keys(self):
return str(self.parameters.keys())
def print_parameters(self):
print(f"Number of conditions: {len(self.all_conditions)}")
print("Running Conditions")
for k in self.parameter.keys():
print(f"{k} :", f"{self[k]}")
print()
def print_slurm_variables(self):
print("Slurm Variables")
for key in self.slurm_variables:
print (key,":",self.slurm_variables[key])
print()
def write_csv(self, out=""):
s=pandas.concat([pandas.Series(self.parameter), pandas.Series(self.slurm_variables)])
s['test_run']=self.test_run
s['date']=time.strftime("%Y_%m_%d")
s['name']=self.name
s['job_id']=self.job_id
if out=='':
s.to_csv(self.name+'.param')
else:
s.to_csv(out)
################
# Coarse Actin #
################
#!/usr/bin/python3
"""
Coarse Actin simulations using a custom
"""
import openmm
import openmm.app
from simtk import unit
import numpy as np
import pandas
import sklearn.decomposition
import configparser
import prody
import scipy.spatial.distance as sdist
import os
import sys
__author__ = '<NAME>'
__version__ = '0.2'
#__location__ = os.path.realpath(
# os.path.join(os.getcwd(), os.path.dirname(__file__)))
#__location__="/scratch/cab22/Bundling/Persistence_length/Persistence_length"
__location__='.'
_ef = 1 * unit.kilocalorie / unit.kilojoule # energy scaling factor
_df = 1 * unit.angstrom / unit.nanometer # distance scaling factor
_af = 1 * unit.degree / unit.radian # angle scaling factor
def parseConfigTable(config_section):
"""Parses a section of the configuration file as a table"""
def readData(config_section, a):
"""Filters comments and returns values as a list"""
temp = config_section.get(a).split('#')[0].split()
l = []
for val in temp:
val = val.strip()
try:
x = int(val)
l += [x]
except ValueError:
try:
y = float(val)
l += [y]
except ValueError:
l += [val]
return l
data = []
for a in config_section:
if a == 'name':
columns = readData(config_section, a)
elif len(a) > 3 and a[:3] == 'row':
data += [readData(config_section, a)]
else:
print(f'Unexpected row {readData(config_section, a)}')
return pandas.DataFrame(data, columns=columns)
# Random rotation matrix
def random_rotation():
"""Generate a 3D random rotation matrix.
Returns:
np.matrix: A 3D rotation matrix.
"""
x1, x2, x3 = np.random.rand(3)
R = np.matrix([[np.cos(2 * np.pi * x1), np.sin(2 * np.pi * x1), 0],
[-np.sin(2 * np.pi * x1), np.cos(2 * np.pi * x1), 0],
[0, 0, 1]])
v = np.matrix([[np.cos(2 * np.pi * x2) * np.sqrt(x3)],
[np.sin(2 * np.pi * x2) * np.sqrt(x3)],
[np.sqrt(1 - x3)]])
H = np.eye(3) - 2 * v * v.T
M = -H * R
return M
# Optimal rotation matrix
# The longest coordinate is X, then Y, then Z.
def optimal_rotation(coords):
c = coords.copy()
c -= c.mean(axis=0)
pca = sklearn.decomposition.PCA()
pca.fit(c)
# Change rotoinversion matrices to rotation matrices
rot = pca.components_[[0, 1, 2]]
if np.linalg.det(rot) < 0:
rot = -rot
#print(rot, np.linalg.det(rot))
return rot
class SystemData:
def __init__(self, atoms, bonds=None, angles=None, dihedrals=None, impropers=None):
self.atoms = atoms
self.atoms.index = np.arange(1, len(self.atoms) + 1)
self.masses = atoms[['type', 'mass']].drop_duplicates()
self.masses.index = np.arange(1, len(self.masses) + 1)
self.n_atoms = len(self.atoms)
self.n_atomtypes = len(self.masses)
if bonds is not None:
self.bonds = bonds
self.bonds.index = np.arange(1, len(self.bonds) + 1)
self.bondtypes = bonds[['type', 'x0', 'k']].drop_duplicates()
self.bondtypes.index = np.arange(1, len(self.bondtypes) + 1)
self.n_bonds = len(self.bonds)
self.n_bondtypes = len(self.bondtypes)
else:
self.bonds = pandas.DataFrame()
self.bondtypes = pandas.DataFrame()
self.n_bonds = 0
self.n_bondtypes = 0
if angles is not None:
self.angles = angles
self.angles.index = np.arange(1, len(self.angles) + 1)
self.angletypes = angles[['type', 'x0', 'k']].drop_duplicates()
self.angletypes.index = np.arange(1, len(self.angletypes) + 1)
self.n_angles = len(self.angles)
self.n_angletypes = len(self.angletypes)
else:
self.angles = pandas.DataFrame()
self.angletypes = pandas.DataFrame()
self.n_angles = 0
self.n_angletypes = 0
if dihedrals is not None:
self.dihedrals = dihedrals
self.dihedrals.index = np.arange(1, len(self.dihedrals) + 1)
self.dihedraltypes = dihedrals[['type', 'x0', 'k']].drop_duplicates()
self.dihedraltypes.index = np.arange(1, len(self.dihedraltypes) + 1)
self.n_dihedrals = len(self.dihedrals)
self.n_dihedraltypes = len(self.dihedraltypes)
else:
self.dihedrals = pandas.DataFrame()
self.dihedraltypes = | pandas.DataFrame() | pandas.DataFrame |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = self.store.select('p4d',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']), Term('items', '=', ['ItemA','ItemB']) ])
expected = p4d.truncate(after='20000108').reindex(minor=['A', 'B'],items=['ItemA','ItemB'])
tm.assert_panel4d_equal(result, expected)
# valid terms
terms = [
dict(field = 'major_axis', op = '>', value = '20121114'),
('major_axis', '20121114'),
('major_axis', '>', '20121114'),
(('major_axis', ['20121114','20121114']),),
('major_axis', datetime(2012,11,14)),
'major_axis>20121114',
'major_axis>20121114',
'major_axis>20121114',
(('minor_axis', ['A','B']),),
(('minor_axis', ['A','B']),),
((('minor_axis', ['A','B']),),),
(('items', ['ItemA','ItemB']),),
('items=ItemA'),
]
for t in terms:
self.store.select('wp', t)
self.store.select('p4d', t)
# valid for p4d only
terms = [
(('labels', '=', ['l1','l2']),),
Term('labels', '=', ['l1','l2']),
]
for t in terms:
self.store.select('p4d', t)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0.,1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r : tm.assert_series_equal(l, r, True, True, True)
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
from datetime import date
ser = Series(values, [date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime(2012, 1, 1), datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
# not consolidated
df['foo'] = np.random.randn(len(df))
self.store['df'] = df
recons = self.store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
finally:
store.close()
os.remove(self.scratchpath)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
try:
store = HDFStore(self.scratchpath)
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ['foo', 'bar'])
finally:
store.close()
os.remove(self.scratchpath)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
try:
store = HDFStore(self.scratchpath)
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
try:
store = HDFStore(self.scratchpath)
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
finally:
store.close()
os.remove(self.scratchpath)
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
self.store['obj'] = df1
tm.assert_frame_equal(self.store['obj'], df1)
self.store['obj'] = df2
tm.assert_frame_equal(self.store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, tm.assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, tm.assert_panel_equal)
def test_wide_table_dups(self):
wp = tm.makePanel()
try:
store = HDFStore(self.scratchpath)
store._quiet = True
store.put('panel', wp, table=True)
store.put('panel', wp, table=True, append=True)
recons = store['panel']
tm.assert_panel_equal(recons, wp)
finally:
store.close()
os.remove(self.scratchpath)
def test_long(self):
def _check(left, right):
tm.assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
self.store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
self.store['a'] = ts
tm.assert_series_equal(self.store['a'], ts)
def test_select(self):
wp = tm.makePanel()
# put/select ok
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.store.select('wp')
# non-table ok (where = None)
self.store.remove('wp')
self.store.put('wp2', wp, table=False)
self.store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100), items = [ 'Item%03d' % i for i in xrange(100) ],
major_axis=date_range('1/1/2000', periods=100), minor_axis = [ 'E%03d' % i for i in xrange(100) ])
self.store.remove('wp')
self.store.append('wp', wp)
items = [ 'Item%03d' % i for i in xrange(80) ]
result = self.store.select('wp', Term('items', items))
expected = wp.reindex(items = items)
tm.assert_panel_equal(expected, result)
# selectin non-table with a where
#self.assertRaises(Exception, self.store.select,
# 'wp2', ('column', ['A', 'D']))
def test_panel_select(self):
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis','>=',date)
crit2 = ('minor_axis', '=', ['A', 'D'])
result = self.store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
tm.assert_panel_equal(result, expected)
result = self.store.select('wp', [ 'major_axis>=20000124', ('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
self.store.put('frame', df, table=True)
date = df.index[len(df) // 2]
crit1 = ('index','>=',date)
crit2 = ('columns',['A', 'D'])
crit3 = ('columns','A')
result = self.store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = self.store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# other indicies for a frame
# integer
df = DataFrame(dict(A = np.random.rand(20), B = np.random.rand(20)))
self.store.append('df_int', df)
self.store.select('df_int', [ Term("index<10"), Term("columns", "=", ["A"]) ])
df = DataFrame(dict(A = np.random.rand(20), B = np.random.rand(20), index = np.arange(20,dtype='f8')))
self.store.append('df_float', df)
self.store.select('df_float', [ Term("index<10.0"), Term("columns", "=", ["A"]) ])
# can't select if not written as table
#self.store['frame'] = df
#self.assertRaises(Exception, self.store.select,
# 'frame', [crit1, crit2])
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
self.store.put('frame', df, table=True)
crit = Term('columns', df.columns[:75])
result = self.store.select('frame', [crit])
| tm.assert_frame_equal(result, df.ix[:, df.columns[:75]]) | pandas.util.testing.assert_frame_equal |
# importing the necessary libraries
import matplotlib.pyplot as plt
import pandas as pd
import re
import random
import math
import numpy as np
random.seed(10)
"""
Read text data from file and pre-process text by doing the following
1. convert to lowercase
2. convert tabs to spaces
3. remove "non-word" characters
Store resulting "words" into an array
"""
FILENAME='SMSSpamCollection'
all_data = open(FILENAME).readlines()
# split into train and test
num_samples = len(all_data)
all_idx = list(range(num_samples))
random.shuffle(all_idx)
idx_limit = int(0.8*num_samples)
train_idx = all_idx[:idx_limit]
test_idx = all_idx[idx_limit:]
train_examples = [all_data[ii] for ii in train_idx]
test_examples = [all_data[ii] for ii in test_idx]
num_spam_lines = 0
num_ham_lines = 0
# Preprocess train and test examples
train_words = []
train_labels = []
test_words = []
test_labels = []
# train examples
for line in train_examples:
line = line.strip('\r\n\t ') # remove trailing spaces, tabs and carraige returne
line = line.lower() # lowercase
line = line.replace("\t", ' ') # convert tabs to spae
line_words = re.findall(r'\w+', line)
line_words = [xx for xx in line_words if xx != ''] # remove empty words
label = line_words[0]
if label == "spam":
label = 1
num_spam_lines += 1 # increment the number of spam lines
else:
label = 0
num_ham_lines += 1 # increment the number of ham lines
line_words = line_words[1:]
train_words.append(line_words)
train_labels.append(label)
# test examples
for line in test_examples:
line = line.strip('\r\n\t ') # remove trailing spaces, tabs and carraige return
line = line.lower() # lowercase
line = line.replace("\t", ' ') # convert tabs to spae
line_words = re.findall(r'\w+', line)
line_words = [xx for xx in line_words if xx != ''] # remove empty words
label = line_words[0]
label = 1 if label == 'spam' else 0
line_words = line_words[1:]
test_words.append(line_words)
test_labels.append(label)
def nbayes_a():
spam_words = []
ham_words = []
alpha = 0.5
N = 20000
for ii in range(len(train_words)): # we pass through words in each (train) SMS
words = train_words[ii]
label = train_labels[ii]
if label == 1:
spam_words += words
else:
ham_words += words
input_words = spam_words + ham_words # all words in the input vocabulary
# Count spam and ham occurances for each word
spam_counts = {}; ham_counts = {}
# Spamcounts
for word in spam_words:
try:
word_spam_count = spam_counts.get(word)
spam_counts[word] = word_spam_count + 1
except:
spam_counts[word] = 1 + alpha # smoothening
for word in ham_words:
try:
word_ham_count = ham_counts.get(word)
ham_counts[word] = word_ham_count + 1
except:
ham_counts[word] = 1 + alpha # smoothening
num_spam = len(spam_words)
num_ham = len(ham_words)
# Training model starts here
p_spam = num_spam_lines / idx_limit # probability of spam
p_ham = num_ham_lines / idx_limit # probability of ham
p_wordgivenspam = {} # probability of each word given spam
p_wordgivenham = {} # probability of each word given ham
denominator_spam = num_spam + (alpha * N)
denominator_ham = num_ham + (alpha * N)
for word in spam_counts:
p_wordgivenspam[word] = (spam_counts[word] / denominator_spam)
for word in ham_counts:
p_wordgivenham[word] = (ham_counts[word] / denominator_ham)
# Training model ends here
# Model run on test data
p_spamgivenline = []
# Calculating probability of spam given the message
for i in range(len(test_words)):
p_spamgivenline.append(p_spam)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenspam.keys():
p_spamgivenline[i] = p_spamgivenline[i] * p_wordgivenspam[test_words[i][j]]
else:
num_spam += 1
p_wordgivenspam[test_words[i][j]] = alpha / denominator_spam
p_spamgivenline[i] = p_spamgivenline[i] * p_wordgivenspam[test_words[i][j]]
p_hamgivenline = []
# Calculating probability of ham given the message
for i in range(len(test_words)):
p_hamgivenline.append(p_ham)
for j in range(len(test_words[i])):
if test_words[i][j] in p_wordgivenham.keys():
p_hamgivenline[i] = p_hamgivenline[i] * p_wordgivenham[test_words[i][j]]
else:
num_ham += 1
p_wordgivenham[test_words[i][j]] = alpha / denominator_ham
p_hamgivenline[i] = p_hamgivenline[i] * p_wordgivenham[test_words[i][j]]
predicted_label = []
# Comparing the probability of spam and ham and appending labels accordingly
for x in range(len(p_spamgivenline)):
if (p_hamgivenline[x] > p_spamgivenline[x]):
predicted_label.append(0)
else:
predicted_label.append(1)
true_pos = 0
true_neg = 0
false_pos = 0
false_neg = 0
# Calculating true positive and negative, false positive and negative
for x in range(len(predicted_label)):
if predicted_label[x] == 0 and test_labels[x] == 0:
true_neg += 1
elif(predicted_label[x] == 1 and test_labels[x] == 1):
true_pos +=1
elif(predicted_label[x] == 0 and test_labels[x] == 1):
false_neg += 1
else:
false_pos += 1
total = true_neg + true_pos + false_neg + false_pos
print("\nTesting Accuracy:", (true_pos + true_neg) / total, "\n")
# Confusion Matrix
data = {'Positive': pd.Series([true_pos, false_neg, ''], index = ['Positive', 'Negative', '(Predicted)']),
'Negative': | pd.Series([false_pos, true_neg, ''], index = ['Positive', 'Negative', '(Predicted)']) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 15:34:32 2021
@author: Thomas
"""
import calendar
import glob
import pandas as pd
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import logging
import itertools
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
#modify to work with kfold
#def smoteAdataset(Xig, yig, test_size=0.2, random_state=0):
#def smoteAdataset(Xig_train, yig_train, Xig_test, yig_test):
# sm=SMOTE(random_state=2)
# Xig_train_res, yig_train_res = sm.fit_sample(Xig_train, yig_train.ravel())
# return Xig_train_res, pd.Series(yig_train_res), Xig_test, pd.Series(yig_test)
def create_logger():
logger_ = logging.getLogger('main')
logger_.setLevel(logging.DEBUG)
fh = logging.FileHandler('simple_lightgbm.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s]%(asctime)s:%(name)s:%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger_.addHandler(fh)
logger_.addHandler(ch)
def get_logger():
return logging.getLogger('main')
def lgb_multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
def multi_weighted_logloss(y_true, y_preds):
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds
y_ohe = pd.get_dummies(y_true)
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
y_p_log = np.log(y_p)
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
nb_pos = y_ohe.sum(axis=0).values.astype(float)
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return loss
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def predict_chunk(df_, clfs_, meta_, features, train_mean):
df_, aux_df_ = preprocess_ts_df(df_)
auxs = make_features(df_, aux_df_)
aggs = get_aggregations()
aggs = get_aggregations()
new_columns = get_new_columns(aggs)
agg_ = df_.groupby('object_id').agg(aggs)
agg_.columns = new_columns
agg_ = add_features_to_agg(df=agg_)
full_test = agg_.reset_index().merge(
right=meta_,
how='left',
on='object_id'
)
for aux in auxs:
full_test = pd.merge(full_test, aux, on='object_id', how='left')
full_test = postprocess_df(full_test)
#full_test = full_test.fillna(train_mean)
preds_ = None
for clf in clfs_:
if preds_ is None:
preds_ = clf.predict_proba(full_test[features]) / len(clfs_)
else:
preds_ += clf.predict_proba(full_test[features]) / len(clfs_)
preds_99 = np.ones(preds_.shape[0])
for i in range(preds_.shape[1]):
preds_99 *= (1 - preds_[:, i])
preds_df_ = pd.DataFrame(preds_, columns=['class_' + str(s) for s in clfs_[0].classes_])
preds_df_['object_id'] = full_test['object_id']
preds_df_['class_99'] = 0.14 * preds_99 / np.mean(preds_99)
print(preds_df_['class_99'].mean())
del agg_, full_test, preds_
gc.collect()
return preds_df_
def save_importances(importances_):
mean_gain = importances_[['gain', 'feature']].groupby('feature').mean()
importances_['mean_gain'] = importances_['feature'].map(mean_gain['gain'])
plt.figure(figsize=(8, 12))
sns.barplot(x='gain', y='feature', data=importances_.sort_values('mean_gain', ascending=False))
plt.tight_layout()
plt.savefig('importances.png')
def train_classifiers(full_train=None, y=None):
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=123)
clfs = []
importances = pd.DataFrame()
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.03,
'subsample': .9,
'colsample_bytree': .6,
'reg_alpha': .01,
'reg_lambda': .01,
'min_split_gain': 0.02,
'min_child_weight': 5,
'n_estimators': 10000,
'silent': -1,
'verbose': -1,
'max_depth': 3,
'seed': 159
}
oof_preds = np.zeros((len(full_train), np.unique(y).shape[0]))
full_ids = np.zeros(len(full_train))
w = y.value_counts()
ori_weights = {i : np.sum(w) / w[i] for i in w.index}
weights = {i : np.sum(w) / w[i] for i in w.index}
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}
for value in classes:
weights[value] = weights[value] * class_weight[value]
for fold_, (trn_, val_) in enumerate(folds.split(y, y)):
lgb_params['seed'] += fold_
trn_x, trn_y = full_train.iloc[trn_], y.iloc[trn_]
val_x, val_y = full_train.iloc[val_], y.iloc[val_]
full_ids[val_] = val_x['object_id']
del val_x['object_id'], trn_x['object_id']
# trn_xa, trn_y, val_xa, val_y=smoteAdataset(trn_x.values, trn_y.values, val_x.values, val_y.values)
# trn_x=pd.DataFrame(data=trn_xa, columns=trn_x.columns)
# val_x=pd.DataFrame(data=val_xa, columns=val_x.columns)
clf = lgb.LGBMClassifier(**lgb_params)
clf.fit(
trn_x, trn_y,
eval_set=[(trn_x, trn_y), (val_x, val_y)],
eval_metric=lgb_multi_weighted_logloss,
verbose=100,
early_stopping_rounds=50,
sample_weight=trn_y.map(weights)
)
oof_preds[val_, :] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)
get_logger().info(multi_weighted_logloss(val_y, clf.predict_proba(val_x, num_iteration=clf.best_iteration_)))
imp_df = pd.DataFrame()
imp_df['feature'] = trn_x.columns
imp_df['gain'] = clf.feature_importances_
imp_df['fold'] = fold_ + 1
importances = pd.concat([importances, imp_df], axis=0, sort=False)
clfs.append(clf)
get_logger().info('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_true=y, y_preds=oof_preds))
preds_df_ = pd.DataFrame(oof_preds, columns=['class_' + str(s) for s in clfs[0].classes_])
preds_df_['object_id'] = full_ids
print(preds_df_.head())
preds_df_.to_csv("oof_predictions.csv", index=False)
unique_y = np.unique(y)
class_map = dict()
for i,val in enumerate(unique_y):
class_map[val] = i
y_map = np.zeros((y.shape[0],))
y_map = np.array([class_map[val] for val in y])
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
cnf_matrix = confusion_matrix(y_map, np.argmax(oof_preds,axis=-1))
np.set_printoptions(precision=2)
sample_sub = pd.read_csv('../input/sample_submission.csv')
class_names = list(sample_sub.columns[1:-1])
del sample_sub;gc.collect()
# Plot non-normalized confusion matrix
plt.figure(figsize=(12,12))
foo = plot_confusion_matrix(cnf_matrix, classes=class_names,normalize=True,
title='Confusion matrix')
return clfs, importances
def get_aggregations():
return {
'flux': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'flux_err': ['min', 'max', 'mean', 'median', 'std', 'skew'],
'detected': ['sum'],
'flux_ratio_sq': ['sum','skew'],
'flux_by_flux_ratio_sq': ['sum','skew'],
}
def get_new_columns(aggs):
return [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def add_features_to_agg(df):
df['flux_diff'] = df['flux_max'] - df['flux_min']
df['flux_dif2'] = (df['flux_max'] - df['flux_min']) / df['flux_mean']
df['flux_w_mean'] = df['flux_by_flux_ratio_sq_sum'] / df['flux_ratio_sq_sum']
df['flux_dif3'] = (df['flux_max'] - df['flux_min']) / df['flux_w_mean']
return df
def agg_per_obj_passband(df, col, agg):
aux = df[['object_id','passband']+[col]]
aggs = {col: [agg]}
aux = df.groupby(['object_id','passband']).agg(aggs).reset_index()
new_df = pd.DataFrame()
new_df['object_id'] = aux['object_id'].unique()
for x in range(0,6):
new_aux = aux[aux['passband'] == x]
del new_aux['passband']
new_aux.columns = ['object_id',col+'_'+agg+'_passband_'+str(x)]
new_df = pd.merge(new_df, new_aux, on='object_id', how='left')
new_df = new_df.fillna(0)
return new_df
def mjd_diff_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df[col+'_diff_detected'] = df[col+'_max'] - df[col+'_min']
aux_df = df.groupby('object_id')[col+'_diff_detected'].max().reset_index()
return aux_df
def mjd_diff2_detected(df, col):
mjd_max = df.groupby('object_id')[col].max().reset_index()
mjd_min = df.groupby('object_id')[col].min().reset_index()
mjd_mean = df.groupby('object_id')[col].mean().reset_index()
mjd_max.columns = ['object_id',col+'_max']
mjd_min.columns = ['object_id',col+'_min']
mjd_mean.columns = ['object_id',col+'_mean']
df = pd.merge(df, mjd_max, on='object_id', how='left')
df = pd.merge(df, mjd_min, on='object_id', how='left')
df = pd.merge(df, mjd_mean, on='object_id', how='left')
df[col+'_diff2_detected'] = (df[col+'_max'] - df[col+'_min']) / df[col+'_mean']
aux_df = df.groupby('object_id')[col+'_diff2_detected'].max().reset_index()
return aux_df
def mjd_diff_detected_passband(df, col):
mjd_max = df.groupby(['object_id','passband'])[col].max().reset_index()
mjd_min = df.groupby(['object_id','passband'])[col].min().reset_index()
mjd_max.columns = ['object_id','passband',col+'_max']
mjd_min.columns = ['object_id','passband',col+'_min']
df = pd.merge(df, mjd_max, on=['object_id','passband'], how='left')
df = | pd.merge(df, mjd_min, on=['object_id','passband'], how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 14:34:52 2021
@author: josea
"""
# %% Imports
import os
import sys
sys.path.append(os.path.abspath(".."))
import utils
import torch
import pandas as pd
import numpy as np
from argparse import Namespace
from collections import defaultdict
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
# %% Set-up paramenters
args = Namespace(
# Path and data information
csv='../data/',
model_save_file='architectures/',
datafiles=['ESP-ENG.csv'],
# Simulation parameters
modelfiles=['ESEN'],
probs=[100],
n_runs=5, # How many versions of the models to train
# Model hyperparameters
embedding_dim=32,
hidden_dims=[32, 64, 128, 256, 512],
n_rnn_layers=1,
drop_p=0.4,
# Training hyperparameters
n_epochs=50,
learning_rate=0.001,
batch_size=128, # Selected based on train-val-test sizes
# Meta parameters
plotting=False,
print_freq=10,
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'),
seed=404
)
utils.set_all_seeds(args.seed, args.device)
# %% Helper
def cosine_distance(dist1, dist2):
return dist1.dot(dist2) / (np.linalg.norm(dist1) * np.linalg.norm(dist2))
def kl_divergence(dist1, dist2):
pos = (dist1 != 0.) & (dist2 != 0.)
return np.sum(dist1[pos] * (np.log2(dist1[pos]) - np.log2(dist2[pos])))
# %%
metrics = {'KL': kl_divergence, 'cosine': cosine_distance}
res_cloud = defaultdict(list)
for data, category in zip(args.datafiles, args.modelfiles):
for prob in args.probs:
end = f"{prob:02}-{100-prob:02}"
df = pd.read_csv(args.csv + data)
vectorizer = utils.Vectorizer.from_df(df)
mask_index = vectorizer.data_vocab.PAD_idx
train_words = list(df[(df.label == 'ESP') &
(df.split == 'train')].data)
test_words = list(df[(df.label == 'ESP') &
(df.split == 'val') |
(df.split == 'test')].data)
train_trie = utils.Trie()
train_trie.insert_many(train_words)
test_trie = utils.Trie()
test_trie.insert_many(test_words)
m_name = f"{category}_{end}"
try:
ngram_results = pd.read_csv(f'ngram_results_{m_name}.csv')
print(f"N-gram results found for {m_name}! Loading from file.")
except:
print(f'Computing n-gram results for {m_name}. This will take a while.')
res = defaultdict(list)
for run in range(args.n_runs):
for n in range(2, 6):
print(f'{n}-gram_{run}')
ngram = utils.CharNGram(data=train_words, n=n,
laplace=(run+1)*0.2)
train_res = utils.eval_distributions(ngram, train_trie,
vectorizer, metrics)
test_res = utils.eval_distributions(ngram, test_trie,
vectorizer, metrics)
res['model'].append(f'{n}-gram')
res['param'].append((run+1)*0.2)
res['run'].append(run)
for met, v in train_res.items():
res[f'train_{met}'].append(v)
for met, v in test_res.items():
res[f'test_{met}'].append(v)
del ngram
ngram_results = pd.DataFrame(res)
ngram_results.to_csv(f'ngram_results_{m_name}.csv', index=False, encoding='utf-8')
try:
cloud_res = pd.read_csv(f'cloud_results_{m_name}.csv')
print(f"CLOUD results found for {m_name}! Loading from file.")
except:
print(f'Computing CLOUD results for {m_name}. This will take a while.')
for hidd in args.hidden_dims:
for run in range(args.n_runs):
print(f"{m_name}_{hidd}_{run}")
cloud = torch.load(args.model_save_file +
f"{m_name}_{hidd}/{m_name}_{hidd}_{run}.pt")
cloud.to('cpu')
cloud.eval()
print('train')
train_res = utils.eval_distributions(cloud, train_trie,
vectorizer, metrics)
print('test')
test_res = utils.eval_distributions(cloud, test_trie,
vectorizer, metrics)
res_cloud['model'].append(f'CLOUD_{hidd}')
res_cloud['param'].append(hidd)
res_cloud['run'].append(run)
for met, v in train_res.items():
res_cloud[f'train_{met}'].append(v)
for met, v in test_res.items():
res_cloud[f'test_{met}'].append(v)
cloud_res = pd.DataFrame(res_cloud)
cloud_res.to_csv(f'cloud_results_{m_name}.csv', index=False,
encoding='utf-8')
results = pd.concat([ngram_results, cloud_res], axis=0)
results.to_csv('backup_compare_architectures.csv', index=False, encoding='utf-8')
# %%
sns.set(style='whitegrid', context='paper', palette='colorblind', font_scale=1.5)
results = | pd.read_csv('backup_compare_architectures.csv') | pandas.read_csv |
#!/usr/bin/env python3
"""Generate non-canonical nucleotide probability predictions using signal align output
"""
import os
import itertools
import numpy as np
import pandas as pd
from py3helpers.utils import list_dir, merge_lists
from py3helpers.multiprocess import *
from signalalign.nanoporeRead import NanoporeRead
from signalalign.signalAlignment import SignalAlignment
from signalalign.train.trainModels import read_in_alignment_file
from signalalign.utils.sequenceTools import CustomAmbiguityPositions, AMBIG_BASES
class MarginalizeVariants(object):
def __init__(self, variant_data, variants, read_name):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variants: bases to track probabilities
:param variant_data: variant data
"""
self.read_name = read_name
self.variant_data = variant_data
self.variants = sorted(variants)
self.columns = merge_lists([['read_name', 'contig', 'position', 'strand', 'forward_mapped'],
list(self.variants)])
self.contig = NanoporeRead.bytes_to_string(self.variant_data["contig"][0])
self.position_probs = pd.DataFrame()
self.has_data = False
self.per_read_calls = pd.DataFrame()
self.per_read_columns = merge_lists([['read_name', 'contig', 'strand', "forward_mapped",
"n_sites"], list(self.variants)])
def get_data(self):
"""Calculate the normalized probability of variant for each nucleotide and across the read"""
# final location of per position data and per read data
data = []
per_read_data = []
for read_strand in (b"t", b"c"):
read_strand_specifc_data = self.variant_data[self.variant_data["strand"] == read_strand]
read_strand = read_strand.decode("utf-8")
if len(read_strand_specifc_data) == 0:
continue
for forward_mapped in set(self.variant_data["forward_mapped"]):
mapping_strand = "-"
if forward_mapped == b"forward":
mapping_strand = "+"
strand_specifc_data = read_strand_specifc_data[read_strand_specifc_data["forward_mapped"] ==
forward_mapped]
if len(strand_specifc_data) == 0:
continue
# get positions on strand
positions = set(strand_specifc_data["reference_position"])
n_positions = len(positions)
strand_read_nuc_data = [0] * len(self.variants)
# marginalize probabilities for each position
for pos in positions:
pos_data = strand_specifc_data[strand_specifc_data["reference_position"] == pos]
total_prob = 0
position_nuc_dict = {x: 0.0 for x in self.variants}
# Get total probability for each nucleotide
for nuc in set(pos_data["base"]):
nuc_data = pos_data[pos_data["base"] == nuc]
nuc_prob = sum(nuc_data["posterior_probability"])
total_prob += nuc_prob
position_nuc_dict[NanoporeRead.bytes_to_string(nuc)] = nuc_prob
# normalize probabilities over each position
nuc_data = [0] * len(self.variants)
for nuc in position_nuc_dict.keys():
index = self.variants.index(nuc)
nuc_data[index] = position_nuc_dict[nuc] / total_prob
strand_read_nuc_data[index] += nuc_data[index]
data.append(merge_lists([[self.read_name, self.contig, pos, read_strand, mapping_strand],
nuc_data]))
if n_positions > 0:
per_read_data.append(merge_lists([[self.read_name, self.contig, read_strand, mapping_strand,
n_positions],
[prob / n_positions for prob in strand_read_nuc_data]]))
self.position_probs = pd.DataFrame(data, columns=self.columns)
self.per_read_calls = pd.DataFrame(per_read_data, columns=self.per_read_columns)
self.has_data = True
return self.position_probs
class MarginalizeFullVariants(object):
def __init__(self, full_data, variants, read_name, forward_mapped):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variants: bases to track probabilities
:param full_data: path to full tsv file
['contig', 'reference_index',
'reference_kmer', 'read_file',
'strand', 'event_index',
'event_mean', 'event_noise',
'event_duration', 'aligned_kmer',
'scaled_mean_current', 'scaled_noise',
'posterior_probability', 'descaled_event_mean',
'ont_model_mean', 'path_kmer']
"""
self.read_name = read_name
self.full_data = full_data
self.variants = sorted(variants)
self.ambig_char = AMBIG_BASES["".join(self.variants)]
self.variant_data = self.full_data[[self.ambig_char in kmer or "X" in kmer for kmer in self.full_data["reference_kmer"]]]
self.forward_mapped = forward_mapped
self.columns = merge_lists([['read_name', 'contig', 'position', 'strand', 'forward_mapped'],
list(self.variants)])
self.contig = NanoporeRead.bytes_to_string(self.full_data["contig"][0])
self.position_probs = pd.DataFrame()
self.has_data = False
self.per_read_calls = pd.DataFrame()
self.per_read_columns = merge_lists([['read_name', 'contig', 'strand', "forward_mapped", "n_sites"],
list(self.variants)])
def get_data(self):
"""Calculate the normalized probability of variant for each nucleotide and across the read"""
# final location of per position data and per read data
data = []
per_read_data = []
if self.forward_mapped:
mapping_strands = ["+", "-"]
else:
mapping_strands = ["-", "+"]
if len(self.variant_data) > 0:
kmer_len_1 = len(self.variant_data["reference_kmer"].iloc[0]) - 1
mapping_index = 0
for read_strand in ("t", "c"):
read_strand_specifc_data = self.variant_data[self.variant_data["strand"] == read_strand]
# read_strand = read_strand.decode("utf-8")
if len(read_strand_specifc_data) == 0:
continue
# get positions on strand
positions = sorted(set(read_strand_specifc_data["reference_index"]))
if mapping_strands[mapping_index] == "-":
positions = positions[::-1]
strand_read_nuc_data = [0] * len(self.variants)
# marginalize probabilities for each position
n_positions = 0
for pos in positions:
pos_data = read_strand_specifc_data[read_strand_specifc_data["reference_index"] == pos]
base = pos_data["aligned_kmer"].iloc[0][kmer_len_1]
if base != self.ambig_char and base != "X":
continue
n_positions += 1
total_prob = 0
position_nuc_dict = {x: 0.0 for x in self.variants}
# Get total probability for each nucleotide
for nuc in self.variants:
# kmer_len_1 = pos_data["reference_kmer"].iloc[0].find("X")
# print(pos_data["reference_kmer"].iloc[0])
nuc_data = pos_data[[nuc == kmer[kmer_len_1] for kmer in pos_data["path_kmer"]]]
nuc_prob = sum(nuc_data["posterior_probability"])
total_prob += nuc_prob
position_nuc_dict[NanoporeRead.bytes_to_string(nuc)] = nuc_prob
# normalize probabilities over each position
nuc_data = [0] * len(self.variants)
for index, nuc in enumerate(self.variants):
assert total_prob > 0, "Check 'variants' parameter. There seems to be no kmers with those " \
"variant characters"
nuc_data[index] = position_nuc_dict[nuc] / total_prob
strand_read_nuc_data[index] += nuc_data[index]
data.append(merge_lists([[self.read_name, self.contig, pos, read_strand,
mapping_strands[mapping_index]], nuc_data]))
if n_positions > 0:
per_read_data.append(merge_lists([[self.read_name, self.contig, read_strand,
mapping_strands[mapping_index], n_positions],
[prob / n_positions for prob in strand_read_nuc_data]]))
mapping_index += 1
self.position_probs = pd.DataFrame(data, columns=self.columns)
self.per_read_calls = pd.DataFrame(per_read_data, columns=self.per_read_columns)
self.has_data = True
else:
self.has_data = False
return self.position_probs
class AggregateOverReads(object):
def __init__(self, variant_tsv_dir, variants="ATGC", verbose=False):
"""Marginalize over all posterior probabilities to give a per position read probability
:param variant_tsv_dir: directory of variantCaller output from signalAlign
:param variants: bases to track probabilities
"""
self.variant_tsv_dir = variant_tsv_dir
self.variants = sorted(variants)
self.columns = merge_lists([['contig', 'position', 'strand', 'forward_mapped'], list(self.variants)])
self.variant_tsvs = list_dir(self.variant_tsv_dir, ext=".vc.tsv")
self.aggregate_position_probs = | pd.DataFrame() | pandas.DataFrame |
import os
from urllib.request import urlretrieve
import pandas as pd
FREEMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_freemont_data(filename='freemont.csv', url=FREEMONT_URL, force_download=False):
""" Download and cache data
Parameters
==========
filename : string (optional)
location to save the data, and name of the file
url : string (optional)
web address of the data
force_download : bool (optional)
if True, force redownload the data
Returns
=======
data : pandas.DataFrame
the freemont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
data = | pd.read_csv(filename, index_col='Date') | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import re
import numpy as np
import pandas as pd
from .data import _get_connection
from .plotting import _init_plot, _draw_plot
from .compound import Compound
from .element import Element
class Stack(object):
"""Foil pack for stacked target calculations
Computes the energy loss and (relative) charged particle flux through a stack
of foils using the Anderson-Ziegler formulation for stopping powers.
Parameters
----------
stack : list of dicts, pd.DataFrame or str
Definition of the foils in the stack. The 'compound' for each foil in
the stack must be given, and the 'areal_density' or some combination of parameters
that allow the areal density to be calculated must also be given. Foils must
also be given a 'name' if they are to be filtered by the .saveas(), .summarize(),
and .plot() methods. By default, foils without 'name' are not included by these
methods.
There are three acceptable formats for `stack`. The first is a pd.DataFrame
with the columns described. The second is a list of dicts, where each dict contains
the appropriate keys. The last is a str, which is a path to a file in either .csv,
.json or .db format, where the headers of the file contain the correct information.
Note that the .json file must follow the 'records' format (see pandas docs). If a .db
file, it must have a table named 'stack'.
The 'areal_density' can be given directly, in units of mg/cm^2, or will be calculated
from the following: 'mass' (in g) and 'area' (in cm^2), 'thickness' (in mm) and 'density'
(in g/cm^3), or just 'thickness' if the compound is a natural element, or
is in `ci.COMPOUND_LIST` or the 'compounds' argument.
Also, the following shorthand indices are supported: 'cm' for 'compound', 'd' for
'density', 't' for 'thickness', 'm' for 'mass', 'a' for 'area', 'ad' for 'areal_density',
and 'nm' for 'name'.
particle : str
Incident ion. For light ions, options are 'p' (default), 'd', 't', 'a' for proton,
deuteron, triton and alpha, respectively. Additionally, heavy ions can be
specified either by element or isotope, e.g. 'Fe', '40CA', 'U', 'Bi-209'.For
light ions, the charge state is assumed to be fully stripped. For heavy ions
the charge state is handled by a Bohr/Northcliffe parameterization consistent
with the Anderson-Ziegler formalism.
E0 : float
Incident particle energy, in MeV. If dE0 is not provided, it will
default to 1 percent of E0.
Other Parameters
----------------
compounds : str, pandas.DataFrame, list or dict
Compound definitions for the compounds included in the foil stack. If the compounds
are not natural elements, or `ci.COMPOUND_LIST`, or if different weights or densities
are required, they can be specified here. (Note specifying specific densities in the
'stack' argument is probably more appropriate.) Also, if the 'compound' name in the
stack is a chemical formula, e.g. 'H2O', 'SrCO3', the weights can be inferred and
'compounds' doesn't need to be given.
If compounds is a pandas DataFrame, it must have the columns 'compound', 'element', one of
'weight', 'atom_weight', or 'mass_weight', and optionally 'density'. If a str, it must be
a path to a .csv, .json or .db file, where .json files must be in the 'records' format and
.db files must have a 'compounds' table. All must have the above information. For .csv
files, the compound only needs to be given for the first line of that compound definition.
If compounds is a list, it must be a list of ci.Element or ci.Compound classes. If it is a
dict, it must have the compound names as keys, and weights as values, e.g.
{'Water':{'H':2, 'O':1}, 'Brass':{'Cu':-66,'Zn':-33}}
dE0 : float
1-sigma width of the energy distribution from which the initial
particle energies are sampled, in MeV. Default is to 1 percent of E0.
N : int
Number of particles to simulate. Default is 10000.
dp : float
Density multiplier. dp is uniformly multiplied to all areal densities in the stack. Default 1.0.
chunk_size : int
If N is large, split the stack calculation in to multiple "chunks" of size `chunk_size`. Default 1E7.
accuracy : float
Maximum allowed (absolute) error in the predictor-corrector method. Default 0.01. If error is
above `accuracy`, each foil in the stack will be solved with multiple steps, between `min_steps`
and `max_steps`.
min_steps : int
The minimum number of steps per foil, in the predictor-corrector solver. Default 2.
max_steps : int
The maximum number of steps per foil, in the predictor-corrector solver. Default 50.
Attributes
----------
stack : pandas.DataFrame
'name', 'compound', 'areal_density', mean energy 'mu_E', and 1-sigma energy width 'sig_E'
for each foil in the stack (energies in MeV).
fluxes : pandas.DataFrame
'flux' as a function of 'energy' for each foil in the stack where 'name' is not None.
compounds : dict
Dictionary with compound names as keys, and ci.Compound classes as values.
Examples
--------
>>> stack = [{'cm':'H2O', 'ad':800.0, 'name':'water'},
{'cm':'RbCl', 'density':3.0, 't':0.03, 'name':'salt'},
{'cm':'Kapton', 't':0.025},
{'cm':'Brass', 'm':3.5, 'a':1.0, 'name':'metal'}]
>>> st = ci.Stack(stack, compounds='example_compounds.json')
>>> st = ci.Stack(stack, compounds={'Brass':{'Cu':-66, 'Zn':-33}}, E0=60.0)
>>> print(st.stack)
name compound areal_density mu_E sig_E
0 water H2O 800.00 55.444815 2.935233
1 salt RbCl 9.00 50.668313 0.683532
2 NaN Kapton 3.55 50.612543 0.683325
3 metal Brass 350.00 49.159245 1.205481
>>> st.saveas('stack_calc.csv')
"""
def __init__(self, stack, particle='p', E0=60.0, **kwargs):
self._E0, self._particle = float(E0), particle
self.compounds = {}
self._parse_kwargs(**kwargs)
if type(stack)==str:
if stack.endswith('.json'):
df = pd.read_json(stack, orient='records')
elif stack.endswith('.csv'):
df = pd.read_csv(stack, header=0)
elif stack.endswith('.db'):
df = pd.read_sql('SELECT * FROM stack', _get_connection(stack))
elif type(stack)==list:
df = pd.DataFrame(stack)
elif type(stack)==pd.DataFrame:
df = stack
df = self._filter_cols(df)
for cm in df['compound']:
if cm not in self.compounds:
self.compounds[cm] = Compound(cm)
def _ad(s):
if not np.isnan(s['areal_density']):
return s['areal_density']
if not np.isnan(s['mass']) and not np.isnan(s['area']):
return 1E3*s['mass']/s['area']
if not np.isnan(s['thickness']):
if not np.isnan(s['density']):
return 1E2*s['density']*s['thickness']
else:
return 1E2*self.compounds[s['compound']].density*s['thickness']
ad = df.apply(_ad, axis=1)
self.stack = pd.DataFrame({'name':df['name'], 'compound':df['compound'], 'areal_density':ad})[['name', 'compound', 'areal_density']]
self._solve()
def _filter_cols(self, df):
cols = []
for cl in df.columns:
c = cl.lower()
if c=='cm':
cols.append('compound')
elif c=='d':
cols.append('density')
elif c=='t':
cols.append('thickness')
elif c=='m':
cols.append('mass')
elif c=='a':
cols.append('area')
elif c=='ad':
cols.append('areal_density')
elif c=='nm':
cols.append('name')
else:
cols.append(c)
df.columns = cols
for c in ['name','density','thickness','mass','area','areal_density']:
if c not in df.columns:
df[c] = np.nan
return df
def _parse_kwargs(self, **kwargs):
self._dE0 = float(kwargs['dE0']) if 'dE0' in kwargs else 0.01*self._E0
self._N = int(kwargs['N']) if 'N' in kwargs else 10000
self._dp = float(kwargs['dp']) if 'dp' in kwargs else 1.0
self._chunk_size = int(kwargs['chunk_size']) if 'chunk_size' in kwargs else int(1E7)
self._accuracy = float(kwargs['accuracy']) if 'accuracy' in kwargs else 0.01
self._min_steps = int(kwargs['min_steps']) if 'min_steps' in kwargs else 2
self._max_steps = int(kwargs['max_steps']) if 'max_steps' in kwargs else 50
if 'compounds' in kwargs:
compounds = kwargs['compounds']
if type(compounds)==str:
if compounds.endswith('.json'):
df = pd.read_json(compounds, orient='records').fillna(method='ffill')
df.columns = map(str.lower, map(str, df.columns))
cms = [str(i) for i in pd.unique(df['compound'])]
self.compounds = {cm:Compound(cm, weights=df[df['compound']==cm]) for cm in cms}
elif compounds.endswith('.csv'):
df = pd.read_csv(compounds, header=0).fillna(method='ffill')
df.columns = map(str.lower, map(str, df.columns))
cms = [str(i) for i in pd.unique(df['compound'])]
self.compounds = {cm:Compound(cm, weights=df[df['compound']==cm]) for cm in cms}
elif compounds.endswith('.db'):
df = pd.read_sql('SELECT * FROM compounds', _get_connection(compounds))
df.columns = map(str.lower, map(str, df.columns))
cms = [str(i) for i in | pd.unique(df['compound']) | pandas.unique |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 21:53:10 2018
改自selectSubjID_inScale_V2
根据给定的条件筛选大表的item和subjects' folder
inputs:
file_all:大表
column_basic1=[0,11,19,20,21,22,23,27,28,29,30]:基本信息列
column_basic2=['学历(年)','中国人利手量表']:基本信息名
column_hamd17=np.arange(104,126,1),
column_hama=np.arange(126,141,1),
column_yars=np.arange(141,153,1),
column_bprs=np.arange(153,177,1)
column_diagnosis='诊断':诊断的列名
column_quality='Resting_quality'
column_note1='诊断备注'
column_note2='备注'
note1_keyword='复扫':重复备注文字
outputs:
folder:筛选出来的ID
basic:筛选出来的基本信息
hamd17,hamm,yars,bprs:筛选出来的量表
logicIndex_scale:量表的逻辑index
logicIndex_repeat:重复量表的index
...
to fetch other output,please check results_dict
@author: <NAME>
new feature:任意条件筛选
"""
# ===============================================
import sys
# sys.path.append(r'D:\myCodes\MVPA_LIChao\MVPA_Python\workstation')
import pandas as pd
import re
import os
import numpy as np
class select_SubjID():
# initial parameters
def __init__(self,
file_all=r"D:\WorkStation_2018\WorkStation_2018_08_Doctor_DynamicFC_Psychosis\Scales\8.30大表.xlsx",
# 基本信息和量表,暂时不能加条件来筛选行
column_basic1=[0, 11, 19, 20, 21, 22, 23, 27, 28, 29, 30],
column_basic2=['学历(年)', '中国人利手量表', '诊断备注', '备注'],
column_hamd17=np.arange(104, 126, 1),
column_hama=np.arange(126, 141, 1),
column_yars=np.arange(141, 153, 1),
column_bprs=np.arange(153, 177, 1),
# 可以加条件筛选行的列(item),字典形式,key为列名,value为条件
# condition_name:{condition:[include_or_exclude,match_method]}
# 注意:对于某一列的所有条件而言,暂时只支持一种筛选方法,要么全纳入,要么全排出
# 事实上,一般情况下,纳入与排出不应该用在同一列
screening_dict={
'诊断': {1: ['include', 'exact'], 2: ['include', 'exact'], 3: ['include', 'exact'], 4: ['include', 'exact']},
'Resting_quality': {'Y': ['include', 'exact']},
'诊断备注': {'复扫': ['exclude', 'fuzzy'], '糖尿病': ['exclude', 'fuzzy'], '不能入组': ['exclude', 'fuzzy']},
'备注': {'复扫': ['exclude', 'fuzzy']}
}
# screening_dict={
# '诊断':{1:['include','exact'],2:['include','exact'],3:['include','exact'],4:['include','exact']},
# 'Resting_quality':{'Y':['include','exact']},
# '诊断备注':{'复扫':['exclude','fuzzy']}
# }
):
# ====================================================
self.file_all = file_all
self.column_basic1 = column_basic1
self.column_basic2 = column_basic2
self.column_hamd17 = column_hamd17
self.column_hama = column_hama
self.column_yars = column_yars
self.column_bprs = column_bprs
self.screening_dict = screening_dict
print('Initialized!\n')
# ====================================================
def loadExcel(self):
# load all clinical data in excel
self.allClinicalData = pd.read_excel(self.file_all)
return self
def extract_one_series(self, column_var):
# 选项目,项目列可以是数字编号,也可以是列名字符串
if isinstance(column_var[0], str):
data = self.allClinicalData.loc[:, column_var]
elif isinstance(self.column_basic1[0], np.int32):
data = self.allClinicalData.iloc[:, column_var]
elif isinstance(self.column_basic1[0], int):
data = self.allClinicalData.iloc[:, column_var]
else:
print('basicIndex 的输入有误!\n')
return data
# ====================================================
def select_item(self):
# 选项目,项目列可以是数字编号,也可以是列名字符串(注意:这些项目暂时不支持行筛选)
basic1 = self.extract_one_series(self.column_basic1)
basic2 = self.extract_one_series(self.column_basic2)
self.basic = pd.concat([basic1, basic2], axis=1)
self.hamd17 = self.extract_one_series(self.column_hamd17)
self.hama = self.extract_one_series(self.column_hama)
self.yars = self.extract_one_series(self.column_yars)
self.bprs = self.extract_one_series(self.column_bprs)
return self
# ====================================================
# 条件筛选
def screen_data_according_conditions_in_dict_one(
self, series_for_screening, condition_in_dict):
# 根据字典里面的条件筛选,并得到index。注意条件可能是字符串也可以是数字。
# 注意:此函数只处理一列。
# 由于contains函数不能处理null,先把null替换为'未知'
series_for_screening = series_for_screening.mask(
series_for_screening.isnull(), '未知')
# 生成index为series_for_screening的index的空pd.DataFrame,用于后续join
screened_ind_all = | pd.DataFrame([]) | pandas.DataFrame |
# Importando as bibliotecas relevantes
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import datetime as dt
# Passos iniciais
df = pd.read_csv('CRDS_Jaragua_G2301_hour_LT.txt') # dataframe de trabalho
df = df.set_index('DATE_TIME') # definição da coluna 'DATE-TIME' como index
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, | pd.Index(rec.dtype.names) | pandas.Index |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
with self.subTest(scalar=scalar):
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_add_str_scalar(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [' ', 'wq', '', '23']
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_add_str_unsupported(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
1,
3.0,
pd.Series(np.arange(n)),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator add(). Not supported for not-comparable operands.'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_mul_str_scalar(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', ' ', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [-1, 0, 2, 5]
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series([-1, 2, 0, 5, 3, -5, 4])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_mul_str_align_index_int1(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes containg same unique values (so alignment doesn't produce NaNs) """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
shuffled_data = np.arange(n, dtype=np.int)
np.random.shuffle(shuffled_data)
index_A = shuffled_data
np.random.shuffle(shuffled_data)
index_B = shuffled_data
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
| pd.testing.assert_series_equal(result, result_ref) | pandas.testing.assert_series_equal |
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/09/18"
import logging
import os
import json
import sys
import pandas as pd
import numpy as np
import random
import math
import itertools
import scipy.stats
from sklearn import linear_model
from math import exp, sqrt
import ai4materials.utils.unit_conversion as uc
logger = logging.getLogger('ai4materials')
def choose_atomic_features(selected_feature_list=None,
atomic_data_file=None, binary_data_file=None):
"""Choose primary features for the extended lasso procedure."""
df1 = pd.read_csv(atomic_data_file, index_col=False)
df2 = pd.read_csv(binary_data_file, index_col=False)
# merge two dataframes on Material
df = pd.merge(df1, df2, on='Mat')
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
radii_s_p = ['rp(A)', 'rs(A)', 'rp(B)', 'rs(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
e_val_z = ['Es(A)', 'val(A)']
df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Es(B)', 'val(B)']
df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(A)', 'val(A)']
df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(B)', 'val(B)']
df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
column_list = df.columns.tolist()
feature_list = column_list
if 'Mat' in feature_list:
feature_list.remove('Mat')
if 'Edim' in feature_list:
feature_list.remove('Edim')
logger.debug("Available features: \n {}".format(feature_list))
df_selected = df[selected_feature_list]
df_selected.insert(0, 'Mat', df['Mat'])
if selected_feature_list:
logger.info("Primary features selected: \n {}".format(selected_feature_list))
else:
logger.error("No selected features.")
sys.exit(1)
return df_selected
def classify_rs_zb(structure):
"""Classify if a structure is rocksalt of zincblend from a list of NoMaD structure.
(one json file). Supports multiple frames (TO DO: check that). Hard-coded.
rocksalt:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.5 0.5 0.5
zincblende:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.25 0.25 0.25
zincblende --> label=0
rocksalt --> label=1
"""
energy = {}
chemical_formula = {}
label = {}
# gIndexRun=0
# gIndexDesc=1
for (gIndexRun, gIndexDesc), atoms in structure.atoms.iteritems():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[(gIndexRun, gIndexDesc)]
# energy=1.0
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[(gIndexRun, gIndexDesc)]
# get labels, works only for RS/ZB dataset
pos_atom_2 = np.asarray(list(structure.scaled_positions.values())).reshape(2, 3)[1, :]
if all(i < 0.375 for i in pos_atom_2):
# label='zincblend'
label[gIndexRun, gIndexDesc] = 0
else:
# label='rocksalt'
label[gIndexRun, gIndexDesc] = 1
break
return chemical_formula, energy, label
def get_energy_diff(chemical_formula_list, energy_list, label_list):
""" Obtain difference in energy (eV) between rocksalt and zincblend structures of a given binary.
From a list of chemical formulas, energies and labels returns a dictionary
with {`material`: `delta_e`} where `delta_e` is the difference between the energy
with label 1 and energy with label 0, grouped by material.
Each element of such list corresponds to a json file.
The `delta_e` is exactly what reported in the PRL 114, 105503(2015).
.. todo:: Check if it works for multiple frames.
"""
energy_ = []
chemical_formula_ = []
label_ = []
# energy and chemical formula are lists even if only one frame is present
for i, energy_i in enumerate(energy_list):
energy_.append(energy_i.values())
for i, chemical_formula_i in enumerate(chemical_formula_list):
chemical_formula_.append(chemical_formula_i.values())
for i, label_i in enumerate(label_list):
label_.append(label_i.values())
# flatten the lists
energy = list(itertools.chain(*energy_))
chemical_formula = list(itertools.chain(*chemical_formula_))
label = list(itertools.chain(*label_))
df = pd.DataFrame()
df['Mat'] = chemical_formula
df['Energy'] = energy
df['Label'] = label
# generate summary dataframe with lowest zincblend and rocksalt energy
# zincblend --> label=0
# rocksalt --> label=1
df_summary = df.sort_values(by='Energy').groupby(['Mat', 'Label'], as_index=False).first()
groupby_mat = df_summary.groupby('Mat')
dict_delta_e = {}
for mat, df in groupby_mat:
# calculate the delta_e (E_RS - E_ZB)
energy_label_1 = df.loc[df['Label'] == 1].Energy.values
energy_label_0 = df.loc[df['Label'] == 0].Energy.values
# if energy_diff>0 --> rs
# if energy_diff<0 --> zb
if (energy_label_0 and energy_label_1):
# single element numpy array --> convert to scalar
energy_diff = (energy_label_1 - energy_label_0).item(0)
# divide by 2 because it is the energy_diff for each atom
energy_diff = energy_diff / 2.0
else:
logger.error(
"Could not find all the energies needed to calculate required property for material '{0}'".format(mat))
sys.exit(1)
dict_delta_e.update({mat: (energy_diff, energy_label_0, energy_label_1)})
return dict_delta_e
def get_lowest_energy_structures(structure, dict_delta_e):
"""Get lowest energy structure for each material and label type.
Works only with two possible labels for a given material.
.. todo:: Check if it works for multiple frames.
"""
energy = {}
chemical_formula = {}
is_lowest_energy = {}
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[gIndexRun, gIndexDesc]
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[gIndexRun, gIndexDesc]
lowest_energy_label_0 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[1]
lowest_energy_label_1 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[2]
if lowest_energy_label_0 > lowest_energy_label_1:
lowest_energy_label_01 = lowest_energy_label_1
else:
lowest_energy_label_01 = lowest_energy_label_0
if energy[gIndexRun, gIndexDesc] == lowest_energy_label_01:
is_lowest_energy[gIndexRun, gIndexDesc] = True
else:
is_lowest_energy[gIndexRun, gIndexDesc] = False
return is_lowest_energy
def write_atomic_features(structure, selected_feature_list, df, dict_delta_e=None,
path=None, filename_suffix='.json', json_file=None):
"""Given the chemical composition, build the descriptor made of atomic features only.
Includes all the frames in the same json file.
.. todo:: Check if it works for multiple frames.
"""
# make dictionary {primary_feature: value} for each structure
# dictionary of a dictionary, key: Mat, value: atomic_features
dict_features = df.set_index('chemical_formula').T.to_dict()
# label=0: rocksalt, label=1: zincblend
#chemical_formula_, energy_, label_ = classify_rs_zb(structure)
#is_lowest_energy_ = get_lowest_energy_structures(structure, dict_delta_e)
if structure.isPeriodic == True:
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
# filename is the normalized absolute path
filename = os.path.abspath(os.path.normpath(os.path.join(path,
'{0}{1}'.format(structure.name, filename_suffix))))
outF = file(filename, 'w')
outF.write("""
{
"data":[""")
cell = structure.atoms[gIndexRun, gIndexDesc].get_cell()
cell = np.transpose(cell)
atoms = structure.atoms[gIndexRun, gIndexDesc]
chemical_formula = structure.chemical_formula_[gIndexRun, gIndexDesc]
energy = structure.energy_eV[gIndexRun, gIndexDesc]
label = label_[gIndexRun, gIndexDesc]
#target = dict_delta_e.get(chemical_formula_[gIndexRun, gIndexDesc])[0]
target = dict_delta_e.get(chemical_formula)
atomic_features = dict_features[structure.chemical_formula[gIndexRun, gIndexDesc]]
#is_lowest_energy = is_lowest_energy_[gIndexRun,gIndexDesc]
res = {
"checksum": structure.name,
"label": label,
"energy": energy,
#"is_lowest_energy": is_lowest_energy,
"delta_e_rs_zb": target,
"chemical_formula": chemical_formula,
"gIndexRun": gIndexRun,
"gIndexDesc": gIndexDesc,
"cell": cell.tolist(),
"particle_atom_number": map(lambda x: x.number, atoms),
"particle_position": map(lambda x: [x.x, x.y, x.z], atoms),
"atomic_features": atomic_features,
"main_json_file_name": json_file,
}
json.dump(res, outF, indent=2)
outF.write("""
] }""")
outF.flush()
return filename
def r_sigma(row):
"""Calculates r_sigma.
John-Bloch's indicator1: |rp(A) + rs(A) - rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
"""
return abs(row[0] + row[1] - row[2] + row[3])
def r_pi(row):
"""Calculates r_pi.
John-Bloch's indicator2: |rp(A) - rs(A)| +| rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
combine_features
"""
return abs(row[0] - row[1]) + abs(row[2] - row[3])
def e_sqrt_z(row):
"""Calculates e/sqrt(val_Z).
Es/sqrt(Zval) and Ep/sqrt(Zval) from Phys. Rev. B 85, 104104 (2012).
Input Es(A) or Ep(A), val(A) (A-->B)
They need to be given in this order.
"""
return row[0] / math.sqrt(row[1])
def _get_scaling_factors(columns, metadata_info, energy_unit, length_unit):
"""Calculates characteristic energy and length, given an atomic metadata"""
scaling_factor = []
if columns is not None:
for col in columns:
try:
col_unit = metadata_info[col.split('(', 1)[0]]['units']
# check allowed values, to avoid problem with substance - NOT IDEAD
if col_unit == 'J':
scaling_factor.append(uc.convert_unit(1, energy_unit, target_unit='eV'))
# divide all column by e_0
#df.loc[:, col] *= e_0
elif col_unit == 'm':
scaling_factor.append(uc.convert_unit(1, length_unit, target_unit='angstrom'))
# divide all column by e_0
#df.loc[:, col] *= d_0
else:
scaling_factor.append(1.0)
logger.debug("Feature units are not energy nor lengths. "
"No scale to characteristic length.")
except BaseException:
scaling_factor.append(1.0)
logger.debug("Feature units not included in metadata")
return scaling_factor
def _my_power_2(row):
return pow(row[0], 2)
def _my_power_3(row):
return pow(row[0], 3)
def _my_power_m1(row):
return pow(row[0], -1)
def _my_power_m2(row):
return pow(row[0], -2)
def _my_power_m3(row):
return pow(row[0], -3)
def _my_abs_sqrt(row):
return math.sqrtabs(abs(row[0]))
def _my_exp(row):
return exp(row[0])
def _my_exp_power_2(row):
return exp(pow(row[0], 2))
def _my_exp_power_3(row):
return exp(pow(row[0], 3))
def _my_sum(row):
return row[0] + row[1]
def _my_abs_sum(row):
return abs(row[0] + row[1])
def _my_abs_diff(row):
return abs(row[0] - row[1])
def _my_diff(row):
return row[0] - row[1]
def _my_div(row):
return row[0] / row[1]
def _my_sum_power_2(row):
return pow((row[0] + row[1]), 2)
def _my_sum_power_3(row):
return pow((row[0] + row[1]), 3)
def _my_sum_exp(row):
return exp(row[0] + row[1])
def _my_sum_exp_power_2(row):
return exp(pow(row[0] + row[1], 2))
def _my_sum_exp_power_3(row):
return exp(pow(row[0] + row[1], 3))
def combine_features(df=None, energy_unit=None, length_unit=None,
metadata_info=None, allowed_operations=None, derived_features=None):
"""Generate combination of features given a dataframe and a list of allowed operations.
For the exponentials, we introduce a characteristic energy/length
converting the
..todo:: Fix under/overflow errors, and introduce handling of exceptions.
"""
if allowed_operations:
logger.info('Selected operations:\n {0}'.format(allowed_operations))
else:
logger.warning('No allowed operations selected.')
# make derived features
if derived_features is not None:
if 'r_sigma' in derived_features:
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
logger.info('Including rs and rp to allow r_sigma calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
if 'r_pi' in derived_features:
logger.info('Including rs and rp to allow r_pi calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
# e_val_z = ['Es(A)', 'val(A)']
# df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Es(B)', 'val(B)']
# df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
#
# e_val_z = ['Ep(A)', 'val(A)']
# df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Ep(B)', 'val(B)']
# df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
columns_ = df.columns.tolist()
# define subclasses of features (see Phys. Rev. Lett. 114, 105503(2015) Supp. info. pag.1)
# make a dictionary {feature: subgroup}
# features belonging to a0 will not be combined, just added at the end
# dict_features = {
# u'val(B)': 'a0', u'val(A)': 'a0',
#
# u'period__el0':'a0',
# u'period__el1':'a0',
# u'atomic_number__el0': 'a0',
# u'atomic_number__el1': 'a0',
# u'group__el0': 'a0',
# u'group__el1': 'a0',
#
# u'atomic_ionization_potential__el0': 'a1',
# u'atomic_ionization_potential__el1': 'a1',
# u'atomic_electron_affinity__el0': 'a1',
# u'atomic_electron_affinity__el1': 'a1',
# u'atomic_homo_lumo_diff__el0': 'a1',
# u'atomic_homo_lumo_diff__el1': 'a1',
# u'atomic_electronic_binding_energy_el0': 'a1',
# u'atomic_electronic_binding_energy_el1': 'a1',
#
#
# u'HOMO(A)': 'a2', u'LUMO(A)': 'a2', u'HOMO(B)': 'a2', u'LUMO(B)': 'a2',
# u'HL_gap_AB': 'a2',
# u'Ebinding_AB': 'a2',
#
# u'atomic_rs_max__el0': 'a3',
# u'atomic_rs_max__el1': 'a3',
# u'atomic_rp_max__el0': 'a3',
# u'atomic_rp_max__el1': 'a3',
# u'atomic_rd_max__el0': 'a3',
# u'atomic_rd_max__el1': 'a3',
# u'atomic_r_by_2_dimer__el0': 'a3',
# u'atomic_r_by_2_dimer__el1': 'a3',
#
# u'd_AB': 'a3',
# u'r_sigma': 'a3', u'r_pi': 'a3',
#
# u'Eh': 'a4', u'C': 'a4'
# }
dict_features = {
u'period': 'a0',
u'atomic_number': 'a0',
u'group': 'a0',
u'atomic_ionization_potential': 'a1',
u'atomic_electron_affinity': 'a1',
u'atomic_homo_lumo_diff': 'a1',
u'atomic_electronic_binding_energy': 'a1',
u'atomic_homo': 'a2', u'atomic_lumo': 'a2',
u'atomic_rs_max': 'a3',
u'atomic_rp_max': 'a3',
u'atomic_rd_max': 'a3',
u'atomic_r_by_2_dimer': 'a3',
u'r_sigma': 'a3', u'r_pi': 'a3'
}
# standardize the data -
# we cannot reproduce the PRL if we standardize the data
#df_a0 = (df_a0 - df_a0.mean()) / (df_a0.max() - df_a0.min())
#df_a1 = (df_a1 - df_a1.mean()) / (df_a1.max() - df_a1.min())
#df_a2 = (df_a2 - df_a2.mean()) / (df_a2.max() - df_a2.min())
#df_a3 = (df_a3 - df_a3.mean()) / (df_a3.max() - df_a3.min())
#df_a4 = (df_a4 - df_a4.mean()) / (df_a4.max() - df_a4.min())
# df_a0 = df[[col for col in columns_ if dict_features.get(col)=='a0']].astype('float32')
df_a0 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a0']].astype('float32')
df_a1 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a1']].astype('float32')
df_a2 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a2']].astype('float32')
df_a3 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a3']].astype('float32')
df_a4 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a4']].astype('float32')
col_a0 = df_a0.columns.tolist()
col_a1 = df_a1.columns.tolist()
col_a2 = df_a2.columns.tolist()
col_a3 = df_a3.columns.tolist()
col_a4 = df_a4.columns.tolist()
# this list will at the end all the dataframes created
df_list = []
df_b0_list = []
df_b1_list = []
df_b2_list = []
df_b3_list = []
df_c3_list = []
df_d3_list = []
df_e3_list = []
df_f1_list = []
df_f2_list = []
df_f3_list = []
df_x1_list = []
df_x2_list = []
df_x_list = []
# create b0: absolute differences and sums of a0
# this is not in the PRL.
for subset in itertools.combinations(col_a0, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = ['(' + subset[1] + '-' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '/' in allowed_operations:
cols = [subset[0] + '/' + subset[1]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = [subset[1] + '/' + subset[0]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a0, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a0[list(subset)].apply(_my_power_2, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a0[list(subset)].apply(_my_power_3, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_exp, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# create b1: absolute differences and sums of a1
for subset in itertools.combinations(col_a1, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
# create b2: absolute differences and sums of a2
for subset in itertools.combinations(col_a2, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
# create b3: absolute differences and sums of a3
for subset in itertools.combinations(col_a3, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
# create c3: two steps:
# 1) squares of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a3[list(subset)].apply(_my_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a3[list(subset)].apply(_my_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# 2) squares of b3 (only sums) --> sum squared of a3
for subset in itertools.combinations(col_a3, 2):
if '^2' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^2']
data = df_a3[list(subset)].apply(_my_sum_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^3']
data = df_a3[list(subset)].apply(_my_sum_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# create d3: two steps:
# 1) exponentials of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + '+' + subset[1] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# create e3: two steps:
# 1) exponentials of squared a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp(' + subset[0] + '^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp(' + subset[0] + '^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# make dataframes from lists, check if they are not empty
# we make there here because they are going to be used to further
# combine the features
if not df_a0.empty:
df_list.append(df_a0)
if not df_a1.empty:
df_x1_list.append(df_a1)
df_list.append(df_a1)
if not df_a2.empty:
df_x1_list.append(df_a2)
df_list.append(df_a2)
if not df_a3.empty:
df_x1_list.append(df_a3)
df_list.append(df_a3)
if not df_a4.empty:
df_list.append(df_a4)
if df_b0_list:
df_b0 = pd.concat(df_b0_list, axis=1)
col_b0 = df_b0.columns.tolist()
df_b0.to_csv('./df_b0.csv', index=True)
df_list.append(df_b0)
if df_b1_list:
df_b1 = pd.concat(df_b1_list, axis=1)
col_b1 = df_b1.columns.tolist()
df_x1_list.append(df_b1)
df_list.append(df_b1)
if df_b2_list:
df_b2 = pd.concat(df_b2_list, axis=1)
col_b2 = df_b2.columns.tolist()
df_x1_list.append(df_b2)
df_list.append(df_b2)
if df_b3_list:
df_b3 = pd.concat(df_b3_list, axis=1)
col_b3 = df_b3.columns.tolist()
df_x1_list.append(df_b3)
df_list.append(df_b3)
if df_c3_list:
df_c3 = pd.concat(df_c3_list, axis=1)
col_c3 = df_c3.columns.tolist()
df_x2_list.append(df_c3)
df_list.append(df_c3)
if df_d3_list:
df_d3 = | pd.concat(df_d3_list, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import lmfit
from .concentration import cumulative_lq
def update_cumul_df(df, loads, flow):
cumulative_ratio = cumulative_lq(loads, flow)
df.loc[:, 'cumul_flow_ratio'] = cumulative_ratio['flow_ratio']
df.loc[:, 'cumul_load_ratio'] = cumulative_ratio['loads_ratio']
return df
def load_flow_loc(start_end, load_flow, timestep='d'):
start, end = start_end
if timestep=='h':
start = | pd.to_datetime(start + ' 00:00:00') | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
from xgboost import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
# read data sets
train = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\train.csv")
test = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\test.csv")
campaign_data = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\campaign_data.csv")
coupon_item_mapping = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\coupon_item_mapping.csv")
customer_demographics = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\customer_demographics.csv")
customer_transaction_data = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\customer_transaction_data.csv")
item_data = pd.read_csv(r"E:\MyDrive-2\DataScience\av-amexpert\item_data.csv")
customer_transaction_data['sp'] = customer_transaction_data['selling_price']/customer_transaction_data['quantity']
customer_transaction_data = customer_transaction_data.drop_duplicates()
train.shape
mydict = dict(zip(coupon_item_mapping.coupon_id, coupon_item_mapping.item_id))
train = pd.merge(train, campaign_data, on="campaign_id", how="left")
train = pd.merge(train, customer_demographics, on="customer_id", how="left")
train['item_id'] = train['coupon_id'].map(mydict)
train = | pd.merge(train, item_data, on="item_id", how="left") | pandas.merge |
import pandas as pd
import numpy as np
import sklearn as skl
import porch
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.metrics import pairwise_distances
import pickle
import lifelines
import qvalue
from run_analysis import load_metabric, illumina2ensembl_dictionary, get_reactome_illumina, return_pathway
def return_PC1(pathway_data):
pca = PCA(n_components = 1)
pca.fit(pathway_data.dropna())
return pca.components_[0], pca.explained_variance_ratio_[0]
def check_stability(pathway_data, nsamples = 10, fraction = 0.5):
pcs = []
for i in range(nsamples):
sample = pathway_data.sample(frac=fraction)
PC1, exp_var = return_PC1(sample)
pcs.append(PC1)
distances = pairwise_distances(pcs, metric = 'cosine')
correct_direction = np.vectorize(lambda x: np.min([x, 2-x]))
distances = correct_direction(distances)
return [np.mean(distances), np.std(distances)]
if __name__ == "__main__":
## Load data
metabric_path = '../../data/metabric'
illumina2ensembl_path = 'data/illumina2ensembl.txt'
data = load_metabric(metabric_path)
duplicates = ["MB-0025", "MB-0196", "MB-0326", "MB-0329", "MB-0330", "MB-0335", "MB-0355", "MB-0407", "MB-0433", "MB-0547", "MB-2720", "MB-6206"]
data = data.drop(duplicates, axis = 1)
expression_df = data.iloc[8:,:]
metadata_df = data.iloc[:8,:]
illumina_reactome_df = get_reactome_illumina(illumina2ensembl_path)
survival = pickle.load(open('results/metabric_path_survival.p', 'rb'))
survival.index = [x.replace('_','-') for x in survival.index]
genes_cox = pickle.load(open('results/metabric_gene_survival.p', 'rb'))
## Calculate the mean pairwise distances
stability_check_df = pd.DataFrame(columns = ['mean', 'std', 'exp_var', 'ngenes'])
for pathway in np.unique(illumina_reactome_df['reactome_id']):
print('Stability test' + pathway)
pathway_data = return_pathway(data, illumina_reactome_df, pathway)
ngenes = pathway_data.shape[1]
pc1, exp_var = return_PC1(pathway_data)
if pathway_data.shape[1] > 0:
try:
stability_check = check_stability(pathway_data, nsamples = 10, fraction = 0.2)
stability_check_df.loc[pathway] = [stability_check[0], stability_check[1], exp_var, ngenes]
except:
continue
stability_check_random_df = pd.DataFrame(columns = ['mean', 'std', 'ngenes'])
for pathway in np.unique(illumina_reactome_df['reactome_id']):
pathway_data = return_pathway(data, illumina_reactome_df, pathway)
ngenes = pathway_data.shape[1]
print("Random size " + str(ngenes))
random_data = pd.DataFrame(np.random.rand(pathway_data.shape[0], pathway_data.shape[1]))
if pathway_data.shape[1] > 1:
stability_check_random = check_stability(random_data, nsamples = 10, fraction = 0.2)
print(stability_check_random)
stability_check_random_df.loc[pathway] = [stability_check_random[0], stability_check_random[1], ngenes]
bins=np.histogram(np.hstack((stability_check_df['mean'],stability_check_random_df['mean'])), bins=50)[1]
plt.hist(stability_check_df['mean'], bins, density = True, alpha = 0.5)
plt.hist(stability_check_random_df['mean'], bins, density=True, alpha = 0.5)
plt.legend(['Sampled eigenpatients','Random'])
plt.xlabel('Mean pairwise cosine distance')
plt.ylabel('Density')
plt.savefig("plots/stability_vs_random.png", bbox_inches='tight')
stability_check_df['p'] = survival['p']
## Plot the results of the stability check
fig, ax = plt.subplots()
ax.plot(stability_check_df['mean'], stability_check_df['p'], '.', alpha = 0.5)
ax.set_yscale('log')
ax.set_ylabel('Cox p-value')
ax.set_xlabel('Mean pairwise cosine distance')
plt.savefig('plots/stability_p.png', bbox_inches='tight')
## In here we try a new kind of plot to show survival and gene correlation
# metadata = metadata_df
# reactome_df = illumina_reactome_df
# reactome_id = 'R-HSA-196757'
# pathway_data = return_pathway(data, reactome_df, reactome_id)
# vec, exp = return_PC1(pathway_data)
# sort_df = pd.DataFrame(vec, index=pathway_data.columns)
# sort_seq = sort_df.sort_values(0).index.tolist()
# x = preprocessing.StandardScaler().fit_transform(pathway_data)
# pathway_data.loc[:,:] = x
# genes = pathway_data.columns
# pathway_data['T'] = metadata.T['T']
# pathway_data['E'] = (metadata.T['last_follow_up_status'] != 'a')*1
# pathway_data = pathway_data.where(pathway_data['E'] == 1).dropna()
# pathway_data_quantiles = pathway_data.groupby(pd.cut(pathway_data['T'], pathway_data['T'].quantile(np.arange(0.1,1,0.1)))).mean()
# pathway_data_quantiles['id'] = ['(' + str(np.round(x.left/365, 1)) + ', ' + str(np.round(x.right/365, 1)) + ']' for x in pathway_data_quantiles.index]
# pathway_data_long = pd.wide_to_long(pathway_data_quantiles, stubnames='ILMN', sep = '_', i='id', j='probe')
# pathway_data_long = pathway_data_long.reset_index(level=1)
# pathway_data_long['T'] = [np.round(x.mid) for x in pathway_data_long.index]
# pathway_data_long['T'] = pathway_data_long.index
# order = [int(x[5:]) for x in sort_seq]
# name = reactome_df.where(reactome_df['reactome_id'] == reactome_id).dropna()['reactome_name'].iloc[0]
# chart = sns.pointplot(x = 'probe', y = 'ILMN', hue = 'T', data=pathway_data_long, order = order, palette='vlag', scale = 0.5, errwidth=0)
# plt.legend(bbox_to_anchor=(1, 1), title = 'Survival time (years)')
# plt.ylabel('Mean normalized expression')
# plt.xlabel('Gene')
# chart.set_xticklabels(chart.get_xticklabels(), rotation=90)
# plt.plot(sort_df.sort_values(0).values, color = 'y', linestyle = '--', linewidth = 2)
# plt.title(name)
# ax = plt.gca()
# labels = ['ILMN_' + x.get_text() for x in ax.get_xticklabels()]
# dictionary_pd = illumina2ensembl_dictionary('data/illumina2hugo.txt').set_index('probe')
# labels_gene = [np.unique(dictionary_pd.loc[x,'gene'])[0] for x in labels]
# chart.set_xticklabels(labels_gene)
# plt.tight_layout()
# plt.savefig('plots/survial_eigenpatient.png', bbox_inches='tight')
#### We compare patways against their most predictive gene
# genes_cox['q'] = qvalue.qvalues(genes_cox)
# for path in survival.index:
# print('Compare' + path)
# genes = return_pathway(genes_cox,illumina_reactome_df,path).T
# max_p = genes.sort_values('p').iloc[0]['p']
# max_q = genes.sort_values('q').iloc[0]['q']
# ngenes = genes.shape[0]
# survival.loc[path,'max_probe_p'] = max_p
# survival.loc[path,'max_probe_q'] = max_q
# survival.loc[path,'ngenes'] = ngenes
# survival['q'] = qvalue.qvalues(survival, pcolname='p')
# plot = sns.scatterplot(x = survival['p'],
# y = survival['max_probe_p'],
# alpha = 0.5)
# plot.set(xscale = 'log', yscale = 'log', xlabel = 'Pathway p value', ylabel = 'Minimum probe p value')
# plt.plot([1e-24,1], [1e-24,1], linewidth=1, color = 'r')
# plt.savefig('plots/pathway_gene_p.png')
# plot = sns.scatterplot(x = survival['q'],
# y = survival['max_probe_q'],
# alpha = 0.5)
# plot.set(xscale = 'log', yscale = 'log', xlabel = 'Pathway $q$ value', ylabel = 'Minimum probe $q$ value')
# plt.plot([1e-21,1], [1e-21,1], linewidth=1, color = 'r')
# plt.savefig('plots/pathway_gene_q.png', bbox_inches='tight')
### compare concordance indexes
survival_cross_pathways = pickle.load(open('results/metabric_path_cross.p', 'rb'))
survival_cross_genes = pickle.load(open('results/metabric_gene_cross.p', 'rb'))
ssgsea_cross = pickle.load(open('results/ssgsea_cross.p', 'rb'))
survival_cross_pathways['mean'] = np.mean(survival_cross_pathways.values, axis=1)
survival_cross_genes['mean'] = np.mean(survival_cross_genes.values, axis=1)
ssgsea_cross['mean'] = np.mean(ssgsea_cross.values, axis=1)
bins=np.histogram(np.hstack((survival_cross_genes['mean'],survival_cross_pathways['mean'],ssgsea_cross['mean'])), bins=50)[1]
# fig, (ax1,ax2) = plt.subplots(2,1, sharey=True, sharex=True, figsize=(7,10))
plt.hist(survival_cross_pathways['mean'], bins, density = True, alpha = 0.5, histtype='step', fill=True)
plt.hist(survival_cross_genes['mean'], bins, density=True, alpha = 0.5, histtype='step', fill=True)
plt.legend(['Eigengenes','Transcripts'])
plt.xlabel('Concordance Index')
plt.ylabel('Density')
plt.savefig('plots/concordance_A.png', bbox_inches='tight')
plt.hist(survival_cross_pathways['mean'], bins, density = True, alpha = 0.5, histtype='step', fill=True)
plt.hist(ssgsea_cross['mean'], bins, density=True, alpha = 0.5, histtype='step', fill=True)
plt.legend(['Eigengenes','ssGSEA'])
plt.xlabel('Concordance Index')
plt.ylabel('Density')
plt.savefig('plots/concordance_B.png', bbox_inches='tight')
### correlation plot
activities = pickle.load(open('results/metabric_path_activities.p', 'rb'))
activities = activities.iloc[:,:-2].T
reactome_id = 'R-HSA-196757'
metadata = metadata_df
reactome_df = illumina_reactome_df
pathway_data = return_pathway(data, reactome_df, reactome_id)
vec, exp = return_PC1(pathway_data)
sort_df = pd.DataFrame(vec, index=pathway_data.columns)
sort_seq = sort_df.sort_values(0).index.tolist()
genes = pathway_data.columns
pathway_data['T'] = metadata.T['T']
pathway_data['E'] = (metadata.T['last_follow_up_status'] != 'a')*1
pathway_data['Eigengene'] = activities[reactome_id]
pathway_data_dead=pathway_data.where(pathway_data['E'] == 1).dropna()
#x = preprocessing.PowerTransformer(method='box-cox').fit_transform(pathway_data)
x = preprocessing.StandardScaler().fit_transform(pathway_data_dead)
pathway_data_dead.loc[:,:] = x
pathway_data_dead.sort_values(inplace=True,by=['T'])
pathway_data_dead = pathway_data_dead[['Eigengene','T']+sort_seq]
dictionary_pd = illumina2ensembl_dictionary('data/illumina2hugo.txt'
).set_index('probe')
labels_gene = [np.unique(dictionary_pd.loc[x,'gene'])[0] for x in pathway_data_dead.columns if 'ILMN_' in x]
pathway_data_dead.columns = [ x for x in pathway_data_dead.columns if 'ILMN_' not in x ] + labels_gene
#pathway_data_dead
#pathway_data_dead_time = pathway_data_dead.copy()
pathway_data_dead.rename(columns={"T":"Survival Time"}, inplace=True)
corr = pathway_data_dead.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
sns.set_context("talk")
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
#sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
# square=True, linewidths=.5, cbar_kws={"shrink": .5})
sns.heatmap(corr, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
f.savefig("plots/R-HSA-196757-Corr.png", bbox_inches='tight')
#### Permutation test
# permutation_results = pickle.load(open('results/permutation_results.p', 'rb'))
# cox_results = pickle.load(open('results/metabric_path_survival.p', 'rb'))
# for pathway, row in permutation_results.iterrows():
# z = np.abs(row['base'])
# perms = np.abs(row['perms'])
# num_higher = sum(x >= z for x in perms)
# p = num_higher/len(perms)
# cox_results.loc[pathway,'p_perms'] = p
# cox_results = cox_results.dropna()
# plt.scatter(cox_results['p'],cox_results['p_perms'], alpha = 0.5)
# plt.xlabel('Cox derived p value')
# plt.ylabel('Permutation derived p value')
# plt.yscale('log')
# plt.xscale('log')
# lim = np.min([np.min(cox_results['p']), np.min(cox_results['p_perms'])])
# plt.plot([1,lim],[1,lim], color='r', alpha = 0.5)
# plt.savefig('plots/permutation.png, bbox_inches='tight'')
#### We compare patways against their most predictive gene
compare_df = | pd.DataFrame(index = survival_cross_pathways.index, columns = ['path','probes_max', 'probes_mean', 'probes_median','ngenes']) | pandas.DataFrame |
#!/usr/bin/env python
'''
This code makes the figures for the manuscript "
'''
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib as mpl
import os
import sys
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
from glob import glob
import itertools as it
import matplotlib.patches as mpatches
import scona.make_graphs as mg
# Read in some of the other NSPN_CODE functions too
#this_scripts_dir=os.path.dirname(os.path.abspath(__file__))
#sys.path.append(this_scripts_dir)
#from networkx_functions import *
#from regional_correlation_functions import *
#from NSPN_functions import *
def plot_rich_club(rc, rc_rand, ax=None, figure_name=None, x_max=200, y_max=1.2, color=sns.color_palette()[0], norm=False):
'''
Make a pretty plot of the rich club values per degree
along with the rich club values you'd expect by chance
from a random network with preserved degree distribution
rc and rc_rand are calculated by the rich_club function
that is saved within the networkx_functions.py file
'''
# Import what you need
import matplotlib.pylab as plt
import seaborn as sns
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 6))
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig=None
if not norm:
# Plot the real rich club data
sns.tsplot(rc, color=color, ax=ax)
# Plot the random rich club data with confidence intervals error bars
sns.tsplot(rc_rand.T, err_style='ci_bars', color='grey', ci=95, ax=ax)
# Fix the x and y axis limits
ax.set_xlim((0, x_max))
ax.set_ylim((0, y_max))
else:
# Divide the real rich club by the averge of the
# randomised rich club to get a normalised curve
rc_norm = rc / rc_rand.T
sns.tsplot(rc_norm, err_style='ci_bars', color=color, ax=ax, ci=95)
# Make sure there aren't too many bins!
plt.locator_params(nbins=5)
# Set the x and y axis labels
ax.set_xlabel("Degree")
if not norm:
ax.set_ylabel("Rich Club")
else:
ax.set_ylabel("Normalised Rich Club")
# Despine because we all agree it looks better that way
sns.despine()
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def plot_degree_dist(G, ER=True, ax=None, figure_name=None, x_max=200, y_max=0.1, color=sns.color_palette()[0]):
'''
Make a pretty plot of the degree distribution
along with the degree distibution of an Erdos Renyi random
graph that has the same number of nodes and edges
'''
# Import what you need
import matplotlib.pylab as plt
import seaborn as sns
# Calculate the degrees from the graph
degrees = np.array(list(dict(G.degree()).values()))
degrees = degrees.astype('float')
# Calculate the Erdos Renyi graph from the main graph
# it has to match the number of nodes and edges
nodes = len(G.nodes())
cost = G.number_of_edges() * 2.0 / (nodes*(nodes-1))
G_ER = nx.erdos_renyi_graph(nodes, cost)
# Now calculate the degrees for the ER graph
degrees_ER = np.array(list(dict(G_ER.degree()).values()))
degrees_ER = degrees_ER.astype('float')
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 6))
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig=None
# Plot the read degrees and the ER degrees
sns.distplot(degrees, ax=ax)
if ER:
sns.kdeplot(degrees_ER, ax=ax, color='grey')
# Fix the x and y axis limits
ax.set_xlim((0, x_max))
ax.set_ylim((0, y_max))
# Make sure there aren't too many bins!
plt.locator_params(nbins=4)
# Set the x and y axis labels
ax.set_xlabel("Degree")
ax.set_ylabel("Probability")
#ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Despine because we all agree it looks better that way
sns.despine()
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def plot_network_measures(measure_dict, ax=None, figure_name=None, y_max=2.5, y_min=-0.5, color=sns.color_palette()[0]):
'''
Create a plot of the network measures
along with their random counterparts
'''
import seaborn as sns
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from scipy import stats
# Set the seaborn context and whotnot
sns.set_style('white')
sns.set_context("poster", font_scale=2)
# Read the measures dictionary into an array
df = measure_dict
# And re-order the columns in the data frame so that
# the graph will look nice
df = df[['a', 'a_rand',
'M', 'M_rand',
'E', 'E_rand',
'C', 'C_rand',
'L', 'L_rand',
'sigma', 'sigma_rand']]
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 6))
else:
fig=None
# Add a bar plot for each measure
for i in range(round(len(df.columns)/2)):
# Show the actual measure with error bars
# (Note that the error will be 0 for all measures
# except the small world coefficient)
if df[df.columns[i*2]].std() > 0.0000001:
ci = stats.norm.ppf((1+0.95)/2, scale=np.std(df[df.columns[i*2]]))
else:
ci = 0
ax.bar(i-0.12,
df[df.columns[i*2]].mean(),
yerr=ci,
width=0.2,
align='center',
color=color,
ecolor=color,
edgecolor='black')
# Show the random networks with error bars
if df[df.columns[i*2+1]].std() > 0.0000001:
ci = stats.norm.ppf((1+0.95)/2, scale=np.std(df[df.columns[i*2+1]]))
else:
ci = 0
ax.bar(i+0.12,
df[df.columns[i*2+1]].mean(),
yerr=ci,
width=0.2,
align='center',
color='grey',
ecolor='grey',
edgecolor='black')
# Sort out the xtick labels
ax.set_xticks(range(round(len(df.columns)/2)))
ax.set_xticklabels(df.columns[::2])
# Put in a bar at y=0
ax.axhline(0, linewidth=0.5, color='black')
# Fix the y axis limits
ax.set_ylim((y_min, y_max))
# Make sure there aren't too many bins!
plt.locator_params(axis='y', nbins=5)
# Set the y axis label
ax.set_ylabel("Network measures")
# Despine because we all agree it looks better that way
sns.despine()
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def plot_sagittal_network(G,
G_edge,
sagittal_pos,
axial_pos,
integer_adjust=3,
fractional_adjust=2.5,
cmap_name='jet',
ax=None,
figure_name=None):
import matplotlib.pylab as plt
import numpy as np
import networkx as nx
import community
import seaborn as sns
# Save the colormap
cmap = plt.get_cmap(cmap_name)
# Binarize both of these graphs
for u,v,d in G.edges(data=True):
d['weight']=1
for u,v,d in G_edge.edges(data=True):
d['weight']=1
# Compute the best partition based on the threshold you've specified in cost
partition = community.best_partition(G)
# Create a sorted list of communitites (modules) according to their average
# Y coordinate (front to back)
module_list = sort_partition(partition, axial_pos)
# Display the number of modules
size = np.float(len(module_list))
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 6))
else:
fig=None
# Loop through all the nodes, sorted acording to their x value
# meaning that we're going to plot nodes on the LEFT side of the
# brain first so they appear behind the nodes on the RIGHT side of
# the brain
x_values = []
for node in G.nodes():
x_values.append(axial_pos[node][0])
node_list = [ node for (x_coord, node) in sorted(zip(x_values, G.nodes())) ]
# Start the node loop
for node in node_list:
# Look up which module the node is in
mod = partition[node]
# Get the correct color acording to the sorted partition list
color = cmap( module_list.index(mod) / np.float(size) )
# Now draw on the node
nx.draw_networkx_nodes(G, sagittal_pos,
[node],
node_size = integer_adjust + fractional_adjust * np.array(G.degree(node)),
node_color = color,
ax = ax)
# Add in all the edges
nx.draw_networkx_edges(G_edge, sagittal_pos, alpha=0.2, ax = ax)
# Change the x and y limits to make the images look a bit better
ax.set_xlim(-120, 80)
ax.set_ylim(-45, 75)
# Turn the axis labels off
ax.set_axis_off()
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def pretty_scatter(x, y, x_label='x', y_label='y', x_max=None, x_min=None, y_max=None, y_min=None, figure_name=None, ax=None, figure=None, color='k', marker_colors=None, marker_shapes=None, marker_size=100, marker='o', despine_right=True, y0_line=True, x0_line=False):
'''
This function creates a scatter plot with a regression line
for the y variable against the degrees of graph G
'''
# Import what you need
import matplotlib.pylab as plt
import seaborn as sns
# Load the data into a data frame
df = pd.DataFrame({x_label : x,
y_label : y})
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10))
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
if figure is None:
fig = plt.gcf()
else:
fig = figure
# Create a marker colors list if not given
if not marker_colors:
marker_colors = [color] * len(df[x_label])
# Create a marker colors list if not given
if not marker_shapes:
marker_shapes = [ marker ] * len(df[x_label])
df['marker_shapes'] = marker_shapes
df.sort_values(by='marker_shapes', inplace=True)
# Create the linear regression plot
ax = sns.regplot(x_label, y_label,
df, ci=95,
ax=ax,
color=color,
scatter_kws={'marker' : 'none'})
# Add in each of the different points so they have
# the right color and shape
for _x, _y, _s, _c in zip(df[x_label], df[y_label], marker_shapes, marker_colors):
ax.scatter(_x, _y, marker=_s, c=_c, lw=0.25, s=marker_size)
# Fix the x and y axis limits
if np.isscalar(x_max) and np.isscalar(x_min):
ax.set_xlim((x_min, x_max))
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_ylim((y_min, y_max))
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=5)
ax.locator_params(axis='x', nbins=5)
# Put a line at y = 0
if y0_line:
ax.axhline(0, linewidth=1, color='black', linestyle='--')
if x0_line:
ax.axvline(0, linewidth=1, color='black', linestyle='--')
# Despine because we all agree it looks better that way
# If you pass the argument "despine_right" then you aren't
# going to remove the right hand axis - necessary if you're
# going to need two axes.
if despine_right:
sns.despine(ax=ax)
else:
sns.despine(ax=ax, right=False)
ax.yaxis.label.set_rotation(270)
ax.yaxis.labelpad = 25
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def degree_r_values(graph_dict, y, covars_list=['ones'], measure='CT', group='all'):
r_array = np.ones([30])
p_array = np.ones([30])
cost_list = range(1,31)
for i, cost in enumerate(cost_list):
cost = np.float(cost)
covars = '_'.join(covars_list)
key = '{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, cost)
G = graph_dict[key]
degrees = np.array(dict(G.degree()).values())
(r_array[i], p_array[i]) = pearsonr(degrees, y)
return r_array, p_array
def create_violin_labels():
'''
A little function to create a labels list for the MT depth
violin plots
'''
# Create an empty list for the names
labels_list = []
# Create a list of all the depths you care about
depth_list = np.hstack([np.arange(100,-1,-10), np.arange(-40, -81, -40)])
# Loop through all the depths
for i in depth_list:
# Fill in the appropriate label
if i == 100:
labels_list += ["Pial"]
elif i == 0:
labels_list += ["GM/WM"]
elif i > 0:
labels_list += ['{:2.0f}%'.format(100.0 - i)]
else:
labels_list += ['{:2.1f}mm'.format(i/-100.0)]
return labels_list
def create_violin_data(measure_dict, mpm='MT', measure='all_slope_age', cmap='RdBu_r', cmap_min=-7, cmap_max=7):
'''
A little function to create a the data frame list
for the MT depth violin plots
INPUTS:
measure_dict --- dictionary containing measure values
measure -------- one of 'mean'
'std'
'all_slope_age'
'all_slope_ct'
default = 'all_slope_age'
colormap ------- matplotlib colormap
default = 'RdBu_r'
'''
import matplotlib as mpl
# Create an empty data frame for the data
# and an empty list for the associated colors
# The shape of the data frame should be the
# same in the end, but its creation is different
# if we're giving an array of numbers or just
# one value per depth
# Multiple values per depth
if type(measure_dict['{}_projfrac+000_{}'.format(mpm, measure)]) == np.ndarray:
n_values = len(measure_dict['{}_projfrac+000_{}'.format(mpm, measure)])
df = pd.DataFrame({'index' : range(n_values)})
else:
n_values = len(np.array([measure_dict['{}_projfrac+000_{}'.format(mpm, measure)]]))
df = pd.DataFrame({'index' : range(n_values) })
color_list = []
color_dict = {}
# Set up the color mapping
cm = plt.get_cmap(cmap)
cNorm = mpl.colors.Normalize(vmin=cmap_min, vmax=cmap_max)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cm)
# Create a list of all the depths you care about
depth_list = np.hstack([np.arange(100,-1,-10), np.arange(-40, -81, -40)])
# Loop through all the depths
for i in depth_list:
# Fill in the appropriate data
if i >= 0:
m_array = measure_dict['{}_projfrac{:+04.0f}_{}'.format(mpm, i, measure)]
else:
m_array = measure_dict['{}_projdist{:+04.0f}_{}'.format(mpm, i, measure)]
df['{}'.format(i)] = m_array
color_list += [scalarMap.to_rgba(np.mean(df['{}'.format(i)]))]
color_dict['{}'.format(i)] = scalarMap.to_rgba(np.percentile(df['{}'.format(i)], 50))
return df, color_list, color_dict
def violin_mt_depths(measure_dict, mpm='MT', measure='all_slope_age', cmap='PRGn', cmap_min=-7, cmap_max=7, y_max=None, y_min=None, figure_name=None, ax=None, figure=None, y_label=None, vert=True, lam_labels=True, cbar=False, pad=30):
'''
INPUTS:
data_dir --------- where the PARC_*_behavmerge.csv files are saved
measure_dict
vert ------------- create vertical box plots (rather than horizontal)
'''
# Import what you need
import matplotlib.pylab as plt
import seaborn as sns
# Get the data, colors and labels
df, color_list, color_dict = create_violin_data(measure_dict,
mpm=mpm,
measure=measure,
cmap=cmap,
cmap_min=cmap_min,
cmap_max=cmap_max)
labels_list = create_violin_labels()
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10))
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig = figure
# Create the box plot if you have multiple measures per depth
##### You could change this here to a violin plot if you wanted to...
if df.shape[0] > 1:
ax = sns.boxplot(df[df.columns[1:]], palette=color_dict, ax=ax, vert=vert)
# Or make a simple line plot if you're showing one value
# per depth
else:
x = np.arange(len(df[df.columns[1:]].values[0]), 0, -1) - 1
y = df[df.columns[1:]].values[0]
if vert:
ax.plot(x, y, color=color_list[0])
ax.set_xlim(-0.5, 12.5)
ax.set_xticks(range(13))
else:
ax.plot(y, x, color=color_list[0])
ax.invert_yaxis()
ax.set_ylim(12.5, -0.5)
ax.set_yticks(range(13))
# Adjust a bunch of values to make the plot look lovely!
if vert:
# Fix the y axis limits
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_ylim((y_min, y_max))
# Set tick labels to be in scientific format if they're larger than 100
# or smaller than 0.001
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=4)
# Add in the tick labels and rotate them
ax.set_xticklabels(labels_list, rotation=90)
# Put a line at the grey white matter boundary
# and another at y=0
ax.axvline(10, linewidth=1, color='black', linestyle='--', zorder=-1)
ax.axhline(0, linewidth=1, color='black', linestyle='-', zorder=-1)
# Set the y label if it's been given
if y_label:
ax.set_ylabel(y_label)
else:
# Fix the x axis limits
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_xlim((y_min, y_max))
# Set tick labels to be in scientific format if they're larger than 100
# or smaller than 0.001
ax.ticklabel_format(axis='x', style='sci', scilimits=(-5,5))
size = ax.get_yticklabels()[0].get_fontsize()
for lab in ax.get_yticklabels():
f_size = lab.get_fontsize()
lab.set_fontsize(f_size * 0.85)
# Add in the tick labels
ax.set_yticklabels(labels_list)
# Make sure there aren't too many bins!
ax.locator_params(axis='x', nbins=4)
# Put a line at the grey white matter boundary
# and another at x=0
ax.axhline(10, linewidth=1, color='black', linestyle='--', zorder=-1)
ax.axvline(0, linewidth=1, color='black', linestyle='-', zorder=-1)
# Set the y label if it's been given
if y_label:
ax.set_xlabel(y_label)
# Despine because we all agree it looks better that way
sns.despine()
# Add in the laminae
ax = violin_add_laminae(ax, vert=vert, labels=lam_labels)
# Add a colorbar if necessary:
if cbar:
cb_grid = gridspec.GridSpec(1,1)
pos = ax.get_position()
if vert:
cb_grid.update(left=pos.x1+0.01, right=pos.x1+0.02, bottom=pos.y0, top=pos.y1, wspace=0, hspace=0)
else:
cb_grid.update(left=pos.x0, right=pos.x1, bottom=pos.y0-0.075, top=pos.y0-0.06, wspace=0, hspace=0)
fig = add_colorbar(cb_grid[0], fig,
cmap_name=cmap,
y_min = y_min,
y_max = y_max,
cbar_min=cmap_min,
cbar_max=cmap_max,
show_ticks=False,
vert=vert)
if not vert:
# If you add in a colorbar then you need to move the x axis label
# down just a smidge
ax.set_xlabel(y_label, labelpad=pad)
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def violin_add_laminae(ax, vert=True, labels=True):
'''
Great big thank yous to <NAME> for journeying
to the actual library and reading an actual book to pull
out these values from von Economo's original work.
I took these values from Konrad, averaged across regions to
get an average thickness per region, added these together
to get an average total thickness and divided each value by
this total number to get the percentages.
I then scaled the percentages so they lay ontop of a scale
from 0 - 10 corresponding to the 11 sample depths for the
freesurfer analyses.
The variance around each value was reasonably small.
Means:
0.9 1.6 4.6 5.7 7.6 11.0
Standard deviations:
0.17 0.21 0.25 0.12 0.10 0.12
Mean + 1 standard devation:
1.6 2.2 5.0 6.0 7.8 10.9
Mean - 1 standard deviation:
2.0 2.6 5.5 6.3 8.0 11.1
'''
boundary_values = [0.0, 0.8, 1.4, 4.2, 5.1, 6.9, 10.0]
numerals = [ 'I', 'II', 'III', 'IV', 'V', 'VI', 'WM' ]
# Figure out where the bottom of the plot lies
# (this changes according to the number of samples into
# white matter that you've plotted)
if vert:
left = ax.get_xlim()[0]
right = ax.get_xlim()[1]
boundary_values[0] = left
boundary_values = boundary_values + [ right ]
else:
bottom = ax.get_ylim()[0]
top = ax.get_ylim()[1]
boundary_values[0] = top
boundary_values = boundary_values + [ bottom ]
# Put in the mean boundaries
for top, bottom in zip(boundary_values[1::2], boundary_values[2::2]):
if vert:
ax.axvspan(top, bottom, facecolor=(226/255.0, 226/255.0, 226/255.0), alpha=1.0, edgecolor='none', zorder=-1)
else:
ax.axhspan(top, bottom, facecolor=(226/255.0, 226/255.0, 226/255.0), alpha=1.0, edgecolor='none', zorder=-1)
if labels:
for lab in ax.get_yticklabels():
f_size = lab.get_fontsize()
print(f_size)
for top, bottom, numeral in zip(boundary_values[0:-1], boundary_values[1:], numerals):
if vert:
x_pos = np.mean([top, bottom])
y_pos = ax.get_ylim()[1] - (ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.05
ax.text(x_pos, y_pos, numeral,
horizontalalignment='center',
verticalalignment='center',
fontsize=f_size)
else:
x_pos = ax.get_xlim()[1] - (ax.get_xlim()[1] - ax.get_xlim()[0]) * 0.05
y_pos = np.mean([top, bottom])
ax.text(x_pos, y_pos, numeral,
horizontalalignment='center',
verticalalignment='center',
fontsize=f_size)
return ax
def old_figure_1(graph_dict,
figures_dir,
sagittal_pos,
axial_pos,
measure_dict,
n=10,
covars_list=['ones'],
group='all'):
big_fig, ax_list = plt.subplots(6, 5, figsize=(40, 35), facecolor='white', sharey='row')
cost_list = [ 5, 10, 15, 20, 30 ]
for i, cost in enumerate(cost_list):
cost = np.float(cost)
covars = '_'.join(covars_list)
key = '{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, cost)
print(key)
G = graph_dict['{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, cost)]
G_edge = graph_dict['{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, 2)]
#==== SHOW THE AXIAL VIEW =====-=======================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_sagittalnetwork_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
plot_sagittal_network(G, G_edge, sagittal_pos, axial_pos,
integer_adjust=0.1, fractional_adjust=100.0/cost, cmap_name='jet',
figure_name=figure_name)
ax_list[0, i] = plot_sagittal_network(G, G_edge, sagittal_pos, axial_pos,
integer_adjust=0.1, fractional_adjust=100.0/cost, cmap_name='jet',
ax=ax_list[0, i])
#==== SET UP RANDOM GRAPH =====-=======================
# Start by creating n random graphs
R_list = []
for _ in range(n):
R_list += [ random_graph(G) ]
#============= DEGREE DISTRIBUTION ====================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_degreesKDE_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
plot_degree_dist(G, figure_name=figure_name, x_max=100, y_max=0.1, color=sns.color_palette()[0])
ax_list[1, i] = plot_degree_dist(G, ax=ax_list[1, i], x_max=200, y_max=0.1, color=sns.color_palette()[0])
#============= RICH CLUB ==============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_richclub_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
deg, rc, rc_rand = rich_club(G, R_list, n=n)
plot_rich_club(rc, rc_rand, figure_name=figure_name, x_max=100, y_max=1.2, color=sns.color_palette()[0])
ax_list[2, i] = plot_rich_club(rc, rc_rand, ax=ax_list[2, i], x_max=200, y_max=1.2, color=sns.color_palette()[0])
#============= NETWORK MEASURES =======================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_networkmeasures_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
network_measure_dict = calculate_network_measures(G, R_list, n=n)
plot_network_measures(network_measure_dict,
figure_name=figure_name,
y_max=2.5, y_min=-0.5,
color=sns.color_palette()[0])
ax_list[3, i] = plot_network_measures(network_measure_dict,
ax=ax_list[3, i],
y_max=2.5, y_min=-0.5,
color=sns.color_palette()[0])
#============= CORR DEGREE W/slope CT age =======================
ax_list[4, i] = pretty_scatter(dict(G.degree()).values(), measure_dict['CT_all_slope_age'],
x_label='Degree', y_label='Slope CT with age',
x_max=100, x_min=0,
y_max=0.05, y_min=-0.1,
color='k',
ax=ax_list[4, i],
figure=big_fig)
#============= CORR DEGREE W/slope MT age =======================
ax_list[5, i] = pretty_scatter(dict(G.degree()).values(), measure_dict['MT_projfrac+030_all_slope_age'],
x_label='Degree', y_label='Slope MT(70%) with age',
x_max=100, x_min=0,
y_max=0.020, y_min=-0.010,
color='k',
ax=ax_list[5, i],
figure=big_fig)
# Get rid of y axis labels for columns that aren't on the left side
[ a.set_ylabel('') for a in ax_list[:,1:].reshape(-1) ]
# RAAAANDOMLY - and I don't know why this is happening
# set the x limits for the very last plot to those of the one
# next to it - HMMMMMM
ax_list[5,i].set_xlim( ax_list[5,i-1].get_xlim() )
# Nice tight layout
big_fig.tight_layout()
big_fig.subplots_adjust(top=0.95)
for i, cost in enumerate(cost_list):
big_fig.text((2*i+1)/(len(cost_list)*2.0), 0.99,
'density: {:.0f}%'.format(np.float(cost)),
horizontalalignment='center',
verticalalignment='top',
fontsize=60,
weight='bold')
# Save the figure
filename = os.path.join(figures_dir,
'SuppFigure1_{}_covar_{}.png'.format(measure,
covars))
big_fig.savefig(filename, bbox_inches=0, dpi=100)
plt.close()
def old_figure_2(df_ct, df_mpm, measure_dict, figures_dir, results_dir, aparc_names, mpm='MT'):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
big_fig, ax_list = plt.subplots(3,3, figsize=(30, 18), facecolor='white')
#==== CORRELATE GLOBAL CT WITH AGE =============================
figure_name = os.path.join(figures_dir, 'Global_CT_corr_Age.png')
color=sns.color_palette('RdBu_r', 10)[1]
pretty_scatter(df_ct['age_scan'], df_ct['Global'],
x_label='Age (years)', y_label='Cortical Thickness\n(mm)',
x_max=25, x_min=14,
y_max=3.0, y_min=2.4,
figure_name=figure_name,
color=color)
ax_list[0, 0] = pretty_scatter(df_ct['age_scan'], df_ct['Global'],
x_label='Age (years)', y_label='Cortical Thickness\n(mm)',
x_max=25, x_min=14,
y_max=3.0, y_min=2.4,
color=color,
ax=ax_list[0, 0],
figure=big_fig)
#==== CORRELATE GLOBAL MT(70) WITH AGE =============================
figure_name = os.path.join(figures_dir,
'Global_{}_projfrac+030_corr_Age.png'.format(mpm))
color=sns.color_palette('PRGn_r', 10)[1]
pretty_scatter(df_mpm['age_scan'], df_mpm['Global'],
x_label='Age (years)', y_label='Magnetisation Transfer\nat 70% cortical depth',
x_max=25, x_min=14,
y_max=1.05, y_min=0.8,
figure_name=figure_name,
color=color)
ax_list[1, 0] = pretty_scatter(df_mpm['age_scan'], df_mpm['Global'],
x_label='Age (years)', y_label='Magnetisation Transfer\nat 70% cortical depth',
x_max=25, x_min=14,
y_max=1.05, y_min=0.8,
color=color,
ax=ax_list[1, 0],
figure=big_fig)
#==== CORRELATE GLOBAL MT(70) WITH CT =============================
figure_name = os.path.join(figures_dir,
'Global_{}_projfrac+030_corr_CT.png'.format(mpm))
color=sns.color_palette('PRGn', 10)[1]
pretty_scatter(df_ct['Global'], df_mpm['Global'],
x_label='Cortical Thickness (mm)', y_label='Magnetisation Transfer\nat 70% cortical depth',
x_max=3.0, x_min=2.4,
y_max=1.05, y_min=0.8,
figure_name=figure_name,
color=color)
ax_list[2, 0] = pretty_scatter(df_ct['Global'], df_mpm['Global'],
x_label='Cortical Thickness (mm)', y_label='Magnetisation Transfer\nat 70% cortical depth',
x_max=3.0, x_min=2.4,
y_max=1.05, y_min=0.8,
color=color,
ax=ax_list[2, 0],
figure=big_fig)
#==== SHOW PYSURFER CT CORR AGE =============================
#figure_name = os.path.join(results_dir,
# 'Global_MT_projfrac+030_corr_CT.png')
#img = mpimg.imread(f)
#ax_list[0,1].imshow(img)
# EASY - but needs fiddling with - TBD
#==== CORRELATE GLOBAL CT WITH DeltaCT =============================
figure_name = os.path.join(figures_dir,
'Mean_CT_corr_slope_CT_age.png')
color=sns.color_palette('RdBu_r', 10)[1]
pretty_scatter(measure_dict['CT_all_mean'], measure_dict['CT_all_slope_age'],
x_label='Cortical Thickness (mm)', y_label='Slope CT with age',
x_max=4.0, x_min=1.8,
y_max=0.04, y_min=-0.04,
figure_name=figure_name,
color=color)
ax_list[0, 2] = pretty_scatter(measure_dict['CT_all_mean'], measure_dict['CT_all_slope_age'],
x_label='Cortical Thickness (mm)', y_label='Slope CT with age\n',
x_max=4.0, x_min=1.8,
y_max=0.04, y_min=-0.04,
color=color,
ax=ax_list[0, 2],
figure=big_fig)
#==== SHOW CORR WITH AGE AT DIFFERENT DEPTHS ======================
figure_name = os.path.join(figures_dir,
'{}_projfrac+030_corr_Age_DifferentDepths.png'.format(mpm))
violin_mt_depths(measure_dict,
measure='all_slope_age',
cmap='PRGn',
y_max=0.015, y_min=-0.010,
cmap_min=-0.007, cmap_max=0.007,
figure_name=figure_name,
mpm=mpm,
vert=False)
ax_list[1, 2] = violin_mt_depths(measure_dict,
y_label='Slope MT(70%)\nwith age',
measure='all_slope_age',
y_max=0.015, y_min=-0.010,
cmap_min=-0.007, cmap_max=0.007,
ax=ax_list[1, 2],
figure=big_fig,
mpm=mpm)
#==== SHOW CORR WITH CT AT DIFFERENT DEPTHS ======================
figure_name = os.path.join(figures_dir,
'{}_projfrac+030_corr_CT_DifferentDepths.png'.format(mpm))
violin_mt_depths(measure_dict,
measure='all_slope_ct',
cmap='PRGn',
y_min=-7.0,
y_max=3.0,
cmap_min=-3.0,
cmap_max=3.0,
figure_name=figure_name,
mpm=mpm,
vert=False)
ax_list[2, 2] = violin_mt_depths(measure_dict,
ylabel='Slope MT(70%)\nwith CT',
measure='all_slope_ct',
cmap='PRGn',
y_min=-7.0,
y_max=3.0,
cmap_min=-3.0,
cmap_max=3.0,
ax=ax_list[2, 2],
figure=big_fig,
mpm=mpm)
# Allign the y labels for each column
for ax in ax_list.reshape(-1):
ax.yaxis.set_label_coords(-0.12, 0.5)
# Turn off the axes for the middle column
for ax in ax_list[:,1]:
ax.axis('off')
# Nice tight layout
big_fig.tight_layout()
# Save the figure
filename = os.path.join(figures_dir, 'Figure2.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
plt.close()
def old_figure_3(graph_dict, measure_dict, figures_dir, covars_list=['ones'], group='all', measure='CT'):
import matplotlib.pylab as plt
import numpy as np
import networkx as nx
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
big_fig, ax_list = plt.subplots(2,3, figsize=(30, 12), facecolor='white')
cost = 10
cost = np.float(cost)
covars = '_'.join(covars_list)
key = '{}_covar_{}_{}_COST_{:02.0f}'.format(measure, covars, group, cost)
G = graph_dict[key]
pc_dict = participation_coefficient(G)
pc = np.array(pc_dict.values())
degrees = np.array(dict(G.degree()).values())
#==== CORRELATE DEGREES WITH CHANGE IN CT WITH AGE =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrDegreesSlopeCTAge_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(degrees, measure_dict['CT_all_slope_age'],
x_label='Degree', y_label='Slope CT with age',
x_max=100, x_min=0,
y_max=0.05, y_min=-0.1,
figure_name=figure_name,
color='k')
ax_list[0, 0] = pretty_scatter(degrees, measure_dict['CT_all_slope_age'],
x_label='Degree', y_label='Slope CT with age',
x_max=100, x_min=0,
y_max=0.05, y_min=-0.1,
color='k',
ax=ax_list[0, 0],
figure=big_fig)
#==== CORRELATE PARTICIPATION COEFFS WITH CHANGE IN CT WITH AGE =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrPCSlopeCTAge_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(pc[pc>0], measure_dict['CT_all_slope_age'][pc>0],
x_label='Participation Coefficient', y_label='Slope CT with age',
x_max=1, x_min=0,
y_max=0.05, y_min=-0.1,
figure_name=figure_name,
color='k')
ax_list[1, 0] = pretty_scatter(pc[pc>0], measure_dict['CT_all_slope_age'][pc>0],
x_label='Participation Coefficient', y_label='Slope CT with age',
x_max=1, x_min=0,
y_max=0.05, y_min=-0.1,
color='k',
ax=ax_list[1, 0],
figure=big_fig)
#==== CORRELATE DEGREES WITH CHANGE IN MT30 WITH AGE =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrDegreesSlopeMT+030Age_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(degrees, measure_dict['MT_projfrac+030_all_slope_age'],
x_label='Degree', y_label='Slope MT(70%) with age',
x_max=100, x_min=0,
y_max=20, y_min=-10,
figure_name=figure_name,
color='k')
ax_list[0, 1] = pretty_scatter(degrees, measure_dict['MT_projfrac+030_all_slope_age'],
x_label='Degree', y_label='Slope MT(70%) with age',
x_max=100, x_min=0,
y_max=0.020, y_min=-0.010,
color='k',
ax=ax_list[0, 1],
figure=big_fig)
#==== CORRELATE PARTICIPATION COEFFS WITH CHANGE IN MT30 WITH AGE =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrPCSlopeMT+030Age_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(pc[pc>0], measure_dict['MT_projfrac+030_all_slope_age'][pc>0],
x_label='Participation Coefficient', y_label='Slope MT(70%) with age',
x_max=1, x_min=0,
y_max=20, y_min=-10,
figure_name=figure_name,
color='k')
ax_list[1, 1] = pretty_scatter(pc[pc>0], measure_dict['MT_projfrac+030_all_slope_age'][pc>0],
x_label='Participation Coefficient', y_label='Slope MT(70%) with age',
x_max=1, x_min=0,
y_max=20, y_min=-10,
color='k',
ax=ax_list[1, 1],
figure=big_fig)
#==== CORRELATE DEGREES WITH CHANGE IN MT30 WITH CT =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrDegreesSlopeMT+030CT_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(degrees, measure_dict['MT_projfrac+030_all_slope_ct'],
x_label='Degree', y_label='Slope MT(70%) with CT',
x_max=100, x_min=0,
y_max=0.005, y_min=-0.005,
figure_name=figure_name,
color='k')
ax_list[0, 2] = pretty_scatter(degrees, measure_dict['MT_projfrac+030_all_slope_ct'],
x_label='Degree', y_label='Slope MT(70%) with CT',
x_max=100, x_min=0,
y_max=0.005, y_min=-0.005,
color='k',
ax=ax_list[0, 2],
figure=big_fig)
#==== CORRELATE PARTICIPATION COEFFS WITH CHANGE IN MT30 WITH AGE =============================
figure_name = os.path.join(figures_dir,
'{}_covar_{}_{}_corrPCSlopeMT+030Age_COST_{:02.0f}.png'.format(measure,
covars,
group.upper(),
cost))
pretty_scatter(pc[pc>0], measure_dict['MT_projfrac+030_all_slope_ct'][pc>0],
x_label='Participation Coefficient', y_label='Slope MT(70%) with ct',
x_max=1, x_min=0,
y_max=0.005, y_min=-0.005,
figure_name=figure_name,
color='k')
ax_list[1, 2] = pretty_scatter(pc[pc>0], measure_dict['MT_projfrac+030_all_slope_ct'][pc>0],
x_label='Participation Coefficient', y_label='Slope MT(70%) with CT',
x_max=1, x_min=0,
y_max=0.005, y_min=-0.005,
color='k',
ax=ax_list[1, 2],
figure=big_fig)
# RAAAANDOMLY - and I don't know why this is happening
# set the x limits for the very last plot to those of the one
# next to it - HMMMMMM
#ax_list[3,i].set_xlim( ax_list[3,i-1].get_xlim() )
# Nice tight layout
big_fig.tight_layout()
# Save the figure
filename = os.path.join(figures_dir,
'Figure3_{}_covar_{}_{}_COST_{:02.0f}.png'.format(measure,
covars,
group,
cost))
big_fig.savefig(filename, bbox_inches=0, dpi=100)
plt.close()
def partial_volume_fig(measure_dict, figures_dir):
big_fig, ax_list = plt.subplots(2, 4, figsize=(40, 20), facecolor='white')
#==== SHOW MEAN MT AT DIFFERENT DEPTHS ======================
ax_list[0, 0] = violin_mt_depths(measure_dict,
map='MT',
measure='global_mean',
y_min=0,
y_max=2.0,
cmap='jet',
cmap_min=0,
cmap_max=2.0,
ax=ax_list[0, 0],
figure=big_fig)
ax_list[1, 0] = violin_mt_depths(measure_dict,
map='synthetic',
measure='global_mean',
y_min=0,
y_max=2.0,
cmap='jet',
cmap_min=0,
cmap_max=2.0,
ax=ax_list[1, 0],
figure=big_fig)
#==== SHOW STD AT DIFFERENT DEPTHS ======================
ax_list[0, 1] = violin_mt_depths(measure_dict,
map='MT',
measure='global_std',
y_min=0,
y_max=0.6,
cmap='jet',
cmap_min=0.0,
cmap_max=0.6,
ax=ax_list[0, 1],
figure=big_fig)
ax_list[1, 1] = violin_mt_depths(measure_dict,
map='synthetic',
measure='global_std',
y_min=0,
y_max=0.6,
cmap='jet',
cmap_min=0,
cmap_max=0.6,
ax=ax_list[1, 1],
figure=big_fig)
#==== SHOW CORR W AGE AT DIFFERENT DEPTHS ======================
ax_list[0, 2] = violin_mt_depths(measure_dict,
map='MT',
measure='all_slope_age',
y_min=-10,
y_max=15,
cmap='PRGn',
cmap_min=-15,
cmap_max=15,
ax=ax_list[0, 2],
figure=big_fig)
ax_list[1, 2] = violin_mt_depths(measure_dict,
map='synthetic',
measure='all_slope_age',
y_min=-10,
y_max=15,
cmap='PRGn',
cmap_min=-15,
cmap_max=15,
ax=ax_list[1, 2],
figure=big_fig)
#==== SHOW CORR W CT AT DIFFERENT DEPTHS ======================
ax_list[0, 3] = violin_mt_depths(measure_dict,
map='MT',
measure='all_slope_ct',
y_min=-0.01,
y_max=0.005,
cmap='PRGn',
cmap_min=-0.01,
cmap_max=0.01,
ax=ax_list[0, 3],
figure=big_fig)
ax_list[1, 3] = violin_mt_depths(measure_dict,
map='synthetic',
measure='all_slope_ct',
y_min=-0.01,
y_max=0.005,
cmap='PRGn',
cmap_min=-0.01,
cmap_max=0.01,
ax=ax_list[1, 3],
figure=big_fig)
# Nice tight layout
big_fig.tight_layout()
# Save the figure
filename = os.path.join(figures_dir, 'PartialVolumeFig_AcrossParticipants.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
plt.close()
# MEAN MAGNETISATION TRANSFER ACROSS ALL PARTICIPANTS
def all_mean_mt(measure_dict, figures_dir, mpm='MT'):
figure_name = os.path.join(figures_dir,
'{}_all_mean_DifferentDepths.png'.format(mpm))
fig, ax = plt.subplots(figsize=(10, 8), facecolor='white')
ax = violin_mt_depths(measure_dict,
measure='all_mean',
ylabel='Magnetisation Transfer',
y_min=0.0,
y_max=2.0,
cmap='jet',
cmap_min=0.2,
cmap_max=1.8,
figure=fig,
ax=ax,
mpm=mpm)
# Nice tight layout
big_fig.tight_layout()
fig.subplots_adjust(right=0.9)
cmap = mpl.cm.jet
norm = mpl.colors.Normalize(vmin=0.2, vmax=1.8)
cax = fig.add_axes([0.93, 0.3, 0.02, 0.6])
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
orientation='vertical',
ticks=np.arange(0.2, 1.81, 0.8))
cax.tick_params(labelsize=20)
# Save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close()
def nodal_ct_mt(measure_dict, figures_dir, mpm='MT'):
figure_name = os.path.join(figures_dir,
'Nodal_CT_corr_{}_segCort.png'.format(mpm))
fig, ax = plt.subplots(figsize=(10, 8), facecolor='white')
ax = pretty_scatter(measure_dict['CT_all_mean'], measure_dict['{}all_all_mean'.format(mpm)],
x_label='Average Cortical Thickness (mm)', y_label='Average Magnetisation Transfer',
x_max=3.8, x_min=1.9,
y_max=1.00, y_min=0.750,
color='k',
ax=ax,
figure=fig)
def get_von_economo_color_dict(von_economo):
'''
Create a color dictionary for the von economo values you pass
The color_list is hard coded at the moment... might change one day
'''
color_list = [ 'purple', 'blue', 'green', 'orange', 'yellow', 'cyan' ]
#color_list = [ '0.5', '0.6', '0.7', '0.8', '0.9' ]
# You need to make it into a color dictionary
color_dict={}
for i, color in enumerate(color_list):
color_dict[i+1] = color
return color_dict
def get_von_economo_shapes_dict(von_economo):
'''
Create a dictionary containing a different marker shape for
each of the the von economo values you pass
The shape_list is hard coded at the moment... might change one day
'''
shape_list = [ 'o', '^', 's', 'v', 'd' ]
# You need to make it into a color dictionary
shape_dict={}
for i, shape in enumerate(shape_list):
shape_dict[i+1] = shape
return shape_dict
def von_economo_boxes(measure_dict, figures_dir, von_economo, measure='CT_all_mean', group_label='Cortical Laminar Pattern', y_label=None, y_min=1.5, y_max=4.0, figure_name=None, figure=None, ax=None, von_economo_colors=True, color_dict="muted", cmap_name=None, max_color=False, min_color=False, alpha=1.0):
# Read the data into a data frame
df = pd.DataFrame( { 'x' : measure_dict[measure],
group_label : von_economo } )
# If you've turned on the von_economo_colors flag
# then you'll always used the set color scheme
if von_economo_colors:
color_dict = get_von_economo_color_dict(von_economo)
else:
color_dict = color_dict
# If you've passed a colormap then you're going to make a
# color dict from that colormap
if cmap_name:
cmap = plt.get_cmap(cmap_name)
color_dict = {}
n = len(set(von_economo))
for i, value in enumerate(set(von_economo)):
color_dict[value] = cmap(np.float(i + 0.5)/n)
# Order the box plots from max to min
order = range(np.floor(np.min(von_economo)).astype('int'),
np.floor(np.max(von_economo)).astype('int')+1)
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 8), facecolor='white')
# Set the seaborn style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig = figure
# Make the box plot
bp = sns.boxplot(df.x[df.x>-99],
groupby=df[group_label],
order=order,
palette=color_dict,
ax=ax)
# Set the y label if it's been given
if y_label:
ax.set_ylabel(y_label)
# Set the y limits
ax.set_ylim((y_min, y_max))
# Make the max median line red if requested
if max_color:
medians = [ line.get_ydata()[0] for line in bp.get_lines()[4::6] ]
max_median = np.max(medians)
for line in bp.get_lines()[4::6]:
if line.get_ydata()[0] == max_median:
line.set_color(max_color)
# Make the minimum median line red if requested
if min_color:
medians = [ line.get_ydata()[0] for line in bp.get_lines()[4::6] ]
min_median = np.min(medians)
for line in bp.get_lines()[4::6]:
if line.get_ydata()[0] == min_median:
line.set_color(min_color)
# Change the alpha value for the fill color if requested
start_i = len(set(von_economo))*6 + 2
stop_i = len(set(von_economo))*7 + 2
for patch in bp.get_default_bbox_extra_artists()[start_i:stop_i]:
fc = patch.get_facecolor()
patch.set_facecolor((fc[0], fc[1], fc[2], alpha))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=4)
# Put a line at y = 0
ax.axhline(0, linewidth=1, color='black', linestyle='--')
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def von_economo_scatter(measure_dict, figures_dir, von_economo, measure='CT_all_mean', x_label='x', y_label='y', x_min=1.5, x_max=4.0, y_min=0.8, y_max=1.2, figure_name=None, figure=None, ax=None):
# Set the seaborn style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
# Read the data into a data frame
df = pd.DataFrame( { x_label : measure_dict[x_label],
y_label : measure_dict[y_label],
'Cortical Laminar Pattern' : von_economo } )
# You'll always use this color_list
color_list = [ 'purple', 'blue', 'green', 'orange', 'yellow' ]
# You need to make it into a color dictionary
color_dict={}
for i, color in enumerate(color_list):
color_dict[i+1] = color
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10), facecolor='white')
else:
fig = figure
for i in range(1,6):
df_i = df[df['Cortical Laminar Pattern']==i]
# Create the linear regression plot
ax = sns.regplot(x_label, y_label, df_i, ci=95, ax=ax, color=color_dict[i], scatter_kws={'s': 60})
# Fix the x and y axis limits
if np.isscalar(x_max) and np.isscalar(x_min):
ax.set_xlim((x_min, x_max))
if np.isscalar(y_max) and np.isscalar(y_min):
ax.set_ylim((y_min, y_max))
ax.ticklabel_format(axis='y', style='sci', scilimits=(-3,3))
# Make sure there aren't too many bins!
ax.locator_params(axis='y', nbins=4)
# Put a line at y = 0
ax.axhline(0, linewidth=1, color='black', linestyle='--')
# Despine because we all agree it looks better that way
sns.despine()
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax
def add_four_hor_brains(grid, f_list, big_fig, hor=True):
'''
Take the four pysurfer views (left lateral, left medial,
right medial and right lateral) and arrange them in a row
according to the grid positions given by grid
grid : the gridspec list of grid placements
f_list : list of four file pysurfer image files
big_fig : the figure to which you're adding the images
# THIS WAS UPDATED TO INCLUDE PLOTTING IN A GRID
# Should probably change the function name!
'''
for g_loc, f in zip(grid, f_list):
img = mpimg.imread(f)
# Crop the figures appropriately
# NOTE: this can change depending on which system you've made the
# images on originally - it's a bug that needs to be sorted out!
if 'lateral' in f:
img_cropped = img[115:564, 105:(-100),:]
else:
if hor:
img_cropped = img[90:560, 60:(-55),:]
else:
img_cropped = img[70:580, 40:(-35),:]
# Add an axis to the big_fig
ax_brain = plt.Subplot(big_fig, g_loc)
big_fig.add_subplot(ax_brain)
# Show the brain on this axis
ax_brain.imshow(img_cropped, interpolation='none')
ax_brain.set_axis_off()
return big_fig
def add_colorbar(grid, big_fig, cmap_name, y_min=0, y_max=1, cbar_min=0, cbar_max=1, vert=False, label=None, show_ticks=True, pad=0):
'''
Add a colorbar to the big_fig in the location defined by grid
grid : grid spec location to add colormap
big_fig : figure to which colorbar will be added
cmap_name : name of the colormap
x_min : the minimum value to plot this colorbar between
x_max : the maximum value to plot this colorbar between
cbar_min : minimum value for the colormap (default 0)
cbar_max : maximum value for the colormap (default 1)
vert : whether the colorbar should be vertical (default False)
label : the label for the colorbar (default: None)
ticks : whether to put the tick values on the colorbar (default: True)
pad : how much to shift the colorbar label by (default: 0)
'''
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
# Add an axis to the big_fig
ax_cbar = plt.Subplot(big_fig, grid)
big_fig.add_subplot(ax_cbar)
# Normalise the colorbar so you have the correct upper and
# lower limits and define the three ticks you want to show
norm = mpl.colors.Normalize(vmin=cbar_min, vmax=cbar_max)
if show_ticks:
ticks = [y_min, np.average([y_min, y_max]), y_max]
else:
ticks=[]
# Figure out the orientation
if vert:
orientation='vertical'
rotation=270
else:
orientation='horizontal'
rotation=0
# Add in your colorbar:
cb = mpl.colorbar.ColorbarBase(ax_cbar,
cmap=cmap_name,
norm=norm,
orientation=orientation,
ticks=ticks,
boundaries=np.linspace(y_min, y_max, 300))
if label:
cb.set_label(label, rotation=rotation, labelpad=pad)
return big_fig
def add_cells_picture(data_dir, big_fig, grid):
# Get the file name and read it in as an image
f_name = os.path.join(data_dir, 'CorticalLayers_schematic_cells.jpg')
img = mpimg.imread(f_name)
img_cropped = img[30:, :]
# Add an axis in the bottom left corner
ax = plt.Subplot(big_fig, grid[0])
big_fig.add_subplot(ax)
# Show the picture and turn the axis off
ax.imshow(img_cropped)
ax.axis('off')
# Get the font size
for lab in [ ax.yaxis.label ]:
f_size = lab.get_fontsize()
# Add in the laminar labels
boundary_values = [ 0, 113, 166, 419, 499, 653, 945, 1170 ]
numerals = [ 'I', 'II', 'III', 'IV', 'V', 'VI', 'WM' ]
for top, bottom, numeral in zip(boundary_values[0:], boundary_values[1:], numerals):
x_pos = -0.15 * img_cropped.shape[1]
y_pos = np.mean([top, bottom])
ax.text(x_pos, y_pos, numeral,
horizontalalignment='center',
verticalalignment='center',
fontsize=f_size/2.0)
return big_fig
def figure_1(measure_dict, figures_dir, results_dir, data_dir, mpm='MT', covars_name='none'):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=3)
# Define the sub_dict
sub_dict = measure_dict['308']['COVARS_{}'.format(covars_name)]
# Get the various min and max values:
min_max_dict = get_min_max_values(sub_dict)
axis_label_dict = get_axis_label_dict()
# Create the big figure
big_fig, big_ax = plt.subplots(figsize=(46, 13), facecolor='white')
big_ax.axis('off')
#=========================================================================
# Schematic for how we measured the different layers
grid = gridspec.GridSpec(1, 1)
grid.update(left=0.01, bottom=0.01, top=0.99, right=0.34, wspace=0, hspace=0)
ax = plt.Subplot(big_fig, grid[0])
big_fig.add_subplot(ax)
f_name = os.path.join(data_dir, 'CorticalLayers_schematic_methods.jpg')
img = mpimg.imread(f_name)
ax.imshow(img)
ax.axis('off')
#=========================================================================
# We're going to set up two separate grids for the violin plots so we can
# adjust the spacings independently without screwing up the others!
violin_ax_list = []
# First a space for the first violin plot on the far left
grid = gridspec.GridSpec(1, 1)
grid.update(left=0.39, right=0.64, top=0.97, bottom=0.16, wspace=0, hspace=0)
for g_loc in grid:
violin_ax_list += [ plt.Subplot(big_fig, g_loc) ]
big_fig.add_subplot(violin_ax_list[-1])
# Next a space for the corr with age
grid = gridspec.GridSpec(1, 1)
grid.update(left=0.74, right=0.99, top=0.97, bottom=0.16, wspace=0, hspace=0)
for g_loc in grid:
violin_ax_list += [ plt.Subplot(big_fig, g_loc) ]
big_fig.add_subplot(violin_ax_list[-1])
#=========================================================================
# Schematic for the different cytoarchitectonics for each layer
grid = gridspec.GridSpec(1, 1)
grid.update(left=0.64, right=0.74, top=0.97, bottom=0.155, wspace=0, hspace=0)
big_fig = add_cells_picture(data_dir, big_fig, grid)
#=========================================================================
# MT at 14 (BASELINE MT) ACROSS NODES at different depths
violin_ax_list[0] = violin_mt_depths(sub_dict,
measure='regional_corr_age_c14',
y_label=axis_label_dict['{}_regional_corr_age_c14'.format(mpm)],
cmap='jet',
y_min=min_max_dict['{}_regional_corr_age_c14_min'.format(mpm)],
y_max=min_max_dict['{}_regional_corr_age_c14_max'.format(mpm)],
cmap_min=min_max_dict['{}_regional_corr_age_c14_CBAR_min'.format(mpm)],
cmap_max=min_max_dict['{}_regional_corr_age_c14_CBAR_max'.format(mpm)],
lam_labels=False,
ax=violin_ax_list[0],
figure=big_fig,
mpm=mpm,
vert=False,
cbar=True)
# CORR WITH AGE ACROSS NODES at different depths
violin_ax_list[1] = violin_mt_depths(sub_dict,
measure='regional_corr_age_m',
y_label=axis_label_dict['{}_regional_corr_age_m'.format(mpm)],
cmap='RdBu_r',
y_min=min_max_dict['{}_regional_corr_age_m_min'.format(mpm)],
y_max=min_max_dict['{}_regional_corr_age_m_max'.format(mpm)],
cmap_min=min_max_dict['{}_regional_corr_age_m_max'.format(mpm)]*-1/2.0,
cmap_max=min_max_dict['{}_regional_corr_age_m_max'.format(mpm)]/2.0,
ax=violin_ax_list[1],
figure=big_fig,
lam_labels=False,
mpm=mpm,
vert=False,
cbar=True)
# Also remove the y tick labels for the violin plots
# that are not the first
for ax in violin_ax_list[1:]:
ax.set_yticklabels([])
#====== PANEL LABELS ==================================
big_ax = big_fig.add_subplot(111)
pos = big_ax.get_position()
pos.x0 = 0
pos.x1 = 1
pos.y0 = 0
pos.y1 = 1
big_ax.set_position(pos)
# Turn off the big axis
# You'll use it though to show
# the panel labels
big_ax.axis('off')
big_ax.text(0.015,
0.9,
'a',
horizontalalignment='left',
verticalalignment='bottom',
fontsize=50,
transform=big_ax.transAxes,
weight='bold',
color='w')
big_ax.text(0.61,
0.9,
'b',
horizontalalignment='left',
verticalalignment='bottom',
fontsize=50,
transform=big_ax.transAxes,
weight='bold')
big_ax.text(0.715,
0.9,
' c ',
horizontalalignment='left',
verticalalignment='bottom',
fontsize=50,
transform=big_ax.transAxes,
weight='bold',
bbox=dict(facecolor='white', edgecolor='white', alpha=0.8))
big_ax.text(0.97,
0.9,
'd',
horizontalalignment='left',
verticalalignment='bottom',
fontsize=50,
transform=big_ax.transAxes,
weight='bold')
# Save the figure
output_dir = os.path.join(figures_dir, 'COVARS_{}'.format(covars_name))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = os.path.join(output_dir, 'Figure1.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
rescale(filename, suff='jpg')
plt.close()
def figure_2(measure_dict, figures_dir, results_dir, mpm='MT', covars_name='none'):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=3)
# Define the sub_dict & global stats dict
sub_dict = measure_dict['308']['COVARS_{}'.format(covars_name)]
sub_dict['age_scan'] = measure_dict['308']['age_scan']
global_dict = measure_dict['Global']['COVARS_{}'.format(covars_name)]
sub_dict['CT_global_mean'] = global_dict['CT_global_mean']
sub_dict['MT_projfrac+030_global_mean'] = global_dict['MT_projfrac+030_global_mean']
# Get the various min and max values :
min_max_dict = get_min_max_values(sub_dict)
axis_label_dict = get_axis_label_dict()
# Create the big figure
big_fig = plt.figure(figsize=(34.5, 28), facecolor='white')
#==== FOUR ROWS OF DATA ======================================
# Make a list of the file names for the left lateral image
left_lat_fname_list = [ os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'CT_regional_corr_age_c14_lh_pial_classic_lateral.png'),
os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'MT_projfrac+030_regional_corr_age_c14_lh_pial_classic_lateral.png'),
os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'CT_regional_corr_age_m_masked_p_fdr_lh_pial_classic_lateral.png'),
os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'MT_projfrac+030_regional_corr_age_m_masked_p_fdr_lh_pial_classic_lateral.png') ]
# List the var names that will be used to get the axis labels
# and min/max values
var_name_list = [ ( 'CT_regional_corr_age_c14', 'age_scan', 'CT_global_mean' ),
( 'MT_projfrac+030_regional_corr_age_c14', 'age_scan', 'MT_projfrac+030_global_mean' ),
( 'CT_regional_corr_age_m', 'CT_regional_corr_age_c14', 'MT_projfrac+030_regional_corr_age_c14' ),
( 'MT_projfrac+030_regional_corr_age_m', 'CT_regional_corr_age_m', 'MT_projfrac+030_regional_corr_age_m' ) ]
# List the colorbar names
cmap_name_list = [ 'jet', 'jet', 'winter_r', 'autumn' ]
# Scatter grid
grid = gridspec.GridSpec(4, 1)
grid.update(left=0.75, bottom=0.06, top=0.97, right=0.99, hspace=0.5)
ax_list = []
for g_loc in grid:
ax_list += [ plt.Subplot(big_fig, g_loc) ]
big_fig.add_subplot(ax_list[-1])
for i, (left_lat_fname,
var_name,
cmap_name) in enumerate(zip(left_lat_fname_list,
var_name_list,
cmap_name_list)):
#==== BRAIN IMAGES ======================================
# Plot the braaaaains
f_list = [ left_lat_fname,
left_lat_fname.replace('lh_pial_classic_lateral', 'lh_pial_classic_medial'),
left_lat_fname.replace('lh_pial_classic_lateral', 'rh_pial_classic_medial'),
left_lat_fname.replace('lh_pial_classic_lateral', 'rh_pial_classic_lateral') ]
grid = gridspec.GridSpec(1,4)
grid.update(left=0.01,
right=0.69,
bottom=0.81 - (i*0.25),
top=1.01 - (i*0.25),
wspace=0,
hspace=0)
# Put the four brains in a row
big_fig = add_four_hor_brains(grid, f_list, big_fig)
# Add a colorbar
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left=0.16,
right=0.52,
bottom=0.81 - (i*0.25),
top=0.82 - (i*0.25),
wspace=0,
hspace=0)
big_fig = add_colorbar(cb_grid[0], big_fig,
cmap_name=cmap_name,
cbar_min=min_max_dict['{}_CBAR_min'.format(var_name[0])],
cbar_max=min_max_dict['{}_CBAR_max'.format(var_name[0])],
y_min=min_max_dict['{}_CBAR_min'.format(var_name[0])],
y_max=min_max_dict['{}_CBAR_max'.format(var_name[0])],
label=axis_label_dict[var_name[0]])
#==== SCATTER PLOTS =============================
x_name = var_name[1]
y_name = var_name[2]
if 'global' in y_name:
if y_name == 'CT_global_mean':
cmap_name = 'winter_r'
else:
cmap_name = 'autumn'
x_data = sub_dict[x_name]
y_data = sub_dict[y_name]
color_measure = y_name.replace('global_mean', 'regional_corr_age_m')
norm = mpl.colors.Normalize(vmin=min_max_dict['{}_CBAR_min'.format(color_measure)],
vmax=min_max_dict['{}_CBAR_max'.format(color_measure)])
cmap_converter = mpl.cm.ScalarMappable(norm=norm, cmap=cmap_name)
slope_name = '{}_corr_age_m'.format(y_name)
color = cmap_converter.to_rgba(global_dict[slope_name])
else:
color='k'
x_data = sub_dict[x_name]
y_data = sub_dict[y_name]
ax_list[i] = pretty_scatter(x_data, y_data,
x_label=axis_label_dict[x_name],
y_label=axis_label_dict[y_name],
x_min=min_max_dict['{}_min'.format(x_name)],
x_max=min_max_dict['{}_max'.format(x_name)],
y_min=min_max_dict['{}_min'.format(y_name)],
y_max=min_max_dict['{}_max'.format(y_name)],
color=color,
ax=ax_list[i],
figure=big_fig)
# Make sure axis is in scientific format
ax_list[i].ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Allign the y labels for each column
ax_list[i].yaxis.set_label_coords(-0.14, 0.5)
# Update the font size for the labels
# to be a little smaller
for lab in [ ax_list[i].yaxis.label, ax_list[i].xaxis.label ]:
f_size = lab.get_fontsize()
lab.set_fontsize(f_size * 0.9)
#====== PANEL LABELS ==================================
big_ax = big_fig.add_subplot(111)
pos = big_ax.get_position()
pos.x0 = 0
pos.x1 = 1
pos.y0 = 0
pos.y1 = 1
big_ax.set_position(pos)
# Turn off the big axis
# You'll use it though to show
# the panel labels
big_ax.axis('off')
for i, letter in enumerate([ 'a', 'c', 'e', 'g' ]):
big_ax.text(0.01,
0.96 - (0.25*i),
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=60,
transform=big_ax.transAxes,
weight='bold')
for i, letter in enumerate([ 'b', 'd', 'f', 'h' ]):
big_ax.text(0.97,
0.96 - (0.25*i),
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=60,
transform=big_ax.transAxes,
weight='bold')
# Save the figure
output_dir = os.path.join(figures_dir, 'COVARS_{}'.format(covars_name))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = os.path.join(output_dir, 'Figure2.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
rescale(filename, suff='jpg')
plt.close()
def figure_3(measure_dict, figures_dir, results_dir, data_dir, mpm='MT', covars_name='none', enrich=True):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
# Define the sub_dict
sub_dict = measure_dict['308']['COVARS_{}'.format(covars_name)]
# Get the various min and max values:
min_max_dict = get_min_max_values(sub_dict)
axis_label_dict = get_axis_label_dict()
# Create the big figure
if enrich:
big_fig = plt.figure(figsize=(23, 25), facecolor='white')
else:
big_fig = plt.figure(figsize=(23, 12), facecolor='white')
# Set up the axis grid
grid = gridspec.GridSpec(1, 4)
if enrich:
top_scatter = 0.76
bottom_scatter = 0.585
else:
top_scatter = 0.5
bottom_scatter = 0.1
grid.update(left=0.08, bottom=bottom_scatter, top=top_scatter, right=0.98, hspace=0, wspace=0.15)
# Put an axis in each of the spots on the grid
ax_list = []
for g_loc in grid:
ax_list += [ plt.Subplot(big_fig, g_loc) ]
big_fig.add_subplot(ax_list[-1])
#==== BRAIN DATA ===============================
# Make a list of the file names for the left lateral image
left_lat_fname_list = [ os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'PLS1_with99s_lh_pial_classic_lateral.png'),
os.path.join(results_dir,
'COVARS_{}'.format(covars_name),
'PNGS',
'PLS2_with99s_lh_pial_classic_lateral.png') ]
# List the var names that will be used to get the axis labels
# and min/max values
var_name_list = [ 'PLS1', 'PLS2' ]
# List the colorbar names
cmap_name_list = [ 'RdBu_r', 'RdBu_r' ]
#===== TWO SCATTER PLOTS FOR EACH PLS RESULT ==========
mri_measure_list = [ 'CT_regional_corr_age_c14',
'MT_projfrac+030_regional_corr_age_c14',
'CT_regional_corr_age_m',
'MT_projfrac+030_regional_corr_age_m' ]
# Loop over the two PLS scores and their associated genes
for i, (left_lat_fname,
var_name,
cmap_name) in enumerate(zip(left_lat_fname_list,
var_name_list,
cmap_name_list)):
#==== BRAIN IMAGES ======================================
# Plot the braaaaains
f_list = [ left_lat_fname,
left_lat_fname.replace('lh_pial_classic_lateral', 'lh_pial_classic_medial') ]
grid = gridspec.GridSpec(1,2)
if enrich:
top_brains = 1.06
bottom_brains = 0.76
else:
top_brains = 1.06
bottom_brains = 0.55
grid.update(left=0 + (i*0.5),
right=0.5 + (i*0.5),
bottom=bottom_brains,
top=top_brains,
wspace=0,
hspace=0)
# Put the four brains in a row
big_fig = add_four_hor_brains(grid, f_list, big_fig)
# Add a colorbar
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left=0.05 + (i*0.5),
right=0.45 + (i*0.5),
bottom=bottom_brains+0.05,
top=bottom_brains+0.06,
wspace=0,
hspace=0)
big_fig = add_colorbar(cb_grid[0], big_fig,
cmap_name=cmap_name,
cbar_min=min_max_dict['{}_CBAR_min'.format(var_name)],
cbar_max=min_max_dict['{}_CBAR_max'.format(var_name)],
y_min=min_max_dict['{}_CBAR_min'.format(var_name)],
y_max=min_max_dict['{}_CBAR_max'.format(var_name)],
label=axis_label_dict[var_name])
#===== CORR W MRI ============================
gene_indices = measure_dict['308']['gene_indices']
color='k'
mri_var_name = mri_measure_list[i*2]
for j, mri_var_name in enumerate(mri_measure_list[(2*i):(2*i)+2]):
ax_list[j+(2*i)] = pretty_scatter(sub_dict[mri_var_name][gene_indices],
sub_dict[var_name],
x_label=axis_label_dict[mri_var_name],
y_label=axis_label_dict[var_name],
x_min=min_max_dict['{}_min'.format(mri_var_name)],
x_max=min_max_dict['{}_max'.format(mri_var_name)],
y_min=min_max_dict['{}_min'.format(var_name)],
y_max=min_max_dict['{}_max'.format(var_name)],
color=color,
marker_size=40,
ax=ax_list[j+(2*i)],
figure=big_fig)
for i, ax in enumerate(ax_list):
# Make sure y axis is in scientific format
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
if i in [ 0, 2 ]:
ax.yaxis.set_label_coords(-0.23, 0.5)
else:
# Remove y label and ticklabels altogether
ax.yaxis.set_label_text('')
ax.yaxis.set_ticklabels([])
if i == 1:
pos = ax.get_position()
pos.x0 = pos.x0 - 0.02
pos.x1 = pos.x1 - 0.02
ax.set_position(pos)
if i == 2:
pos = ax.get_position()
pos.x0 = pos.x0 + 0.02
pos.x1 = pos.x1 + 0.02
ax.set_position(pos)
if i == 2 :
# Make sure there aren't too many bins
# for the delta CT plot
ax.locator_params(axis='x', nbins=3)
if enrich:
#=========================================================================
# GO Results
grid = gridspec.GridSpec(1, 1)
grid.update(left=0, bottom=0, top=0.53, right=1, wspace=0, hspace=0)
ax = plt.Subplot(big_fig, grid[0])
big_fig.add_subplot(ax)
f_name = os.path.join(data_dir, 'Fig3_Enrich_withColourBar.png')
img = mpimg.imread(f_name)
ax.imshow(img[5:-5, 5:-5, :], interpolation='none')
ax.axis('off')
#====== PANEL LABELS ==================================
big_ax = big_fig.add_subplot(111)
pos = big_ax.get_position()
pos.x0 = 0
pos.x1 = 1
pos.y0 = 0
pos.y1 = 1
big_ax.set_position(pos)
# Turn off the big axis
# You'll use it though to show
# the panel labels
big_ax.axis('off')
if enrich:
posA = 0.96
posB = 0.74
else:
posA = 0.93
posB = 0.46
for i, letter in enumerate([ 'a', 'd' ]):
big_ax.text(0.01 + (0.5 * i),
posA,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=40,
transform=big_ax.transAxes,
weight='bold')
for i, letter in enumerate([ 'b', 'e' ]):
big_ax.text(0.26 + (0.49*i),
posB,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=40,
transform=big_ax.transAxes,
weight='bold')
for i, letter in enumerate([ 'c', 'f' ]):
big_ax.text(0.3 + (0.49*i),
posB,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=40,
transform=big_ax.transAxes,
weight='bold')
if enrich:
big_ax.text(0.05,
0.48,
'g',
horizontalalignment='left',
verticalalignment='bottom',
fontsize=40,
transform=big_ax.transAxes,
weight='bold')
# Save the figure
output_dir = os.path.join(figures_dir, 'COVARS_{}'.format(covars_name))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = os.path.join(output_dir, 'Figure3.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
rescale(filename, suff='jpg')
plt.close()
def figure_4(measure_dict, graph_dict, figures_dir, results_dir, mpm='MT', rich_club=False, covars_name='none'):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
# Define the sub_dict
sub_dict = measure_dict['308']['COVARS_{}'.format(covars_name)]
sub_dict['Degree'] = measure_dict['308']['Graph_measures']['Degree_CT_ALL_COVARS_ONES_COST_10']
sub_dict['Closeness'] = measure_dict['308']['Graph_measures']['Closeness_CT_ALL_COVARS_ONES_COST_10']
# Get the set values
min_max_dict = get_min_max_values(sub_dict)
axis_label_dict = get_axis_label_dict()
# Create the big figure
big_fig, big_ax = plt.subplots(figsize=(23, 16), facecolor='white')
big_ax.axis('off')
# Create the grid
grid = gridspec.GridSpec(1, 2)
bottom = 0.57
top = 0.98
grid.update(left=0.05, right=0.95, bottom=bottom, top=top, wspace=0.15, hspace=0)
ax_list = []
for g_loc in grid:
ax = plt.Subplot(big_fig, g_loc)
big_fig.add_subplot(ax)
ax_list += [ax]
#======= ANATOMICAL NETWORKS ========================
G = graph_dict['CT_ALL_COVARS_ONES_COST_10']
G_02 = graph_dict['CT_ALL_COVARS_ONES_COST_02']
node_size_dict = { 'Degree' : 16*sub_dict['Degree'],
'Closeness' : 1500*sub_dict['Closeness'] }
if rich_club:
rich_edges, rich_nodes = rich_edges_nodes(G, thresh=85)
else:
rich_nodes = []
cmap_dict = { 'Degree' : 'Reds' ,
'Closeness' : 'Greens' }
for i, network_measure in enumerate([ 'Degree', 'Closeness' ]):
network_measure_key = '{}_CT_ALL_COVARS_ONES_COST_10'.format(network_measure)
network_measure_min = min_max_dict['{}_CBAR_min'.format(network_measure)]
network_measure_max = min_max_dict['{}_CBAR_max'.format(network_measure)]
ax_list[i] = plot_anatomical_network(G,
measure_dict['308']['Graph_measures'],
centroids=measure_dict['308']['centroids'],
measure=network_measure_key,
orientation='sagittal',
cmap_name=cmap_dict[network_measure],
vmin=network_measure_min,
vmax=network_measure_max,
node_size_list=node_size_dict[network_measure],
rc_node_list=rich_nodes,
edge_list=[],
ax=ax_list[i],
continuous=True)
ax_list[i] = plot_anatomical_network(G_02,
measure_dict['308']['Graph_measures'],
centroids=measure_dict['308']['centroids'],
measure=network_measure_key,
orientation='sagittal',
node_list=[],
edge_width=0.8,
ax=ax_list[i])
# Add a colorbar
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left= 0.1 + (i*0.5),
right=0.4 + (i*0.5),
bottom=0.54,
top=0.55,
wspace=0,
hspace=0)
big_fig = add_colorbar(cb_grid[0], big_fig,
cmap_name=cmap_dict[network_measure],
cbar_min=network_measure_min,
cbar_max=network_measure_max,
y_min=network_measure_min,
y_max=network_measure_max,
label=axis_label_dict[network_measure])
#=========================================================================
# Finally put scatter plots of deltaCT, and deltaMT and PLS2 by the network
# measure in the bottom row
#=========================================================================
grid = gridspec.GridSpec(1, 3)
bottom = 0.1
top = 0.45
grid.update(bottom=bottom, top=top, left=0.07, right=0.93, hspace=0.1, wspace=0.1)
ax_list_left = []
ax_list_right = []
for g_loc in grid:
ax = plt.Subplot(big_fig, g_loc)
big_fig.add_subplot(ax)
ax_list_left += [ax]
ax_r = ax.twinx()
ax_list_right += [ax_r]
network_measure_left = 'Degree'
network_measure_left_min = min_max_dict['{}_min'.format(network_measure_left)]
network_measure_left_max = min_max_dict['{}_max'.format(network_measure_left)]
y_label_left = axis_label_dict[network_measure_left]
y_data_left = sub_dict[network_measure_left]
network_measure_right = 'Closeness'
network_measure_right_min = min_max_dict['{}_min'.format(network_measure_right)]
network_measure_right_max = min_max_dict['{}_max'.format(network_measure_right)]
y_label_right = axis_label_dict[network_measure_right]
y_data_right = sub_dict[network_measure_right]
measure_list = [ 'CT_regional_corr_age_m',
'{}_projfrac+030_regional_corr_age_m'.format(mpm),
'PLS2' ]
for i, measure in enumerate(measure_list):
# Set the x and y data
x_data = sub_dict[measure]
# Mask the network values if you're looking at PLS2
if measure == 'PLS2':
gene_indices = measure_dict['308']['gene_indices']
y_data_left = y_data_left[gene_indices]
y_data_right = y_data_right[gene_indices]
# Get the appropriate min, max and label values
# for the y axis
measure_min = min_max_dict['{}_min'.format(measure)]
measure_max = min_max_dict['{}_max'.format(measure)]
x_label = axis_label_dict[measure]
ax = ax_list_left[i]
ax_r = ax_list_right[i]
# Set the color from the colormap above
left_cmap = plt.get_cmap(cmap_dict[network_measure_left])
left_color = left_cmap(0.75)
right_cmap = plt.get_cmap(cmap_dict[network_measure_right])
right_color = right_cmap(0.75)
ax = pretty_scatter(x_data,
y_data_left,
x_label=x_label,
y_label=y_label_left,
x_min=measure_min, x_max=measure_max,
y_min=network_measure_left_min,y_max=network_measure_left_max,
color=left_color,
marker_size=60,
marker='o',
ax=ax,
figure=big_fig,
y0_line=False)
ax.yaxis.set_label_coords(-0.12, 0.5)
ax_r = pretty_scatter(x_data,
y_data_right,
x_label=x_label,
y_label=y_label_right,
x_min=measure_min, x_max=measure_max,
y_min=network_measure_right_min,y_max=network_measure_right_max,
color=right_color,
marker_size=70,
marker='^',
ax=ax_r,
figure=big_fig,
despine_right=False,
y0_line=False)
ax_r.yaxis.set_label_coords(1.2, 0.5)
#====== REMOVE AXIS LABELS ==================================
for ax in ax_list_left[1:] + ax_list_right[:-1]:
ax.yaxis.set_label_text('')
ax.yaxis.set_ticklabels([])
#====== PANEL LABELS ==================================
big_ax = big_fig.add_subplot(111)
pos = big_ax.get_position()
pos.x0 = 0
pos.x1 = 1
pos.y0 = 0
pos.y1 = 1
big_ax.set_position(pos)
# Turn off the big axis
# You'll use it though to show
# the panel labels
big_ax.axis('off')
for i, letter in enumerate(['a', 'b']):
big_ax.text(0.02 + (0.5 * i),
0.92,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=45,
transform=big_ax.transAxes,
weight='bold')
for i, letter in enumerate([ 'c' ]):
big_ax.text(0.035,
0.43,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=45,
transform=big_ax.transAxes,
weight='bold')
for i, letter in enumerate([ 'd', 'e' ]):
big_ax.text(0.38 + (0.295625 * i),
0.43,
letter,
horizontalalignment='left',
verticalalignment='bottom',
fontsize=45,
transform=big_ax.transAxes,
weight='bold')
#=========================================================================
# And finally clean everything up and save the figure
#=========================================================================
# Save the figure
output_dir = os.path.join(figures_dir, 'COVARS_{}'.format(covars_name))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = os.path.join(output_dir, 'Figure4.png')
big_fig.savefig(filename, bbox_inches=0, dpi=100)
rescale(filename, suff='jpg')
plt.close()
def calc_min_max(x, pad=0.05):
'''
Find min and max values such that
all the data lies within 90% of
of the axis range
'''
try:
r = np.max(x) - np.min(x)
if r > 0:
x_min = np.min(x) - pad * r
x_max = np.max(x) + pad * r
else:
x_min = np.mean(x)
x_max = np.mean(x)
except:
x_min = np.nan
x_max = np.nan
return x_min, x_max
def get_min_max_values(measure_dict, gene_indices=None):
'''
These are the appropriate min and max values for the
discovery cohort
'''
min_max_dict = {}
for measure_name, measure_data in measure_dict.items():
measure_min, measure_max = calc_min_max(measure_data, pad=0.05)
min_max_dict['{}_min'.format(measure_name)] = measure_min
min_max_dict['{}_max'.format(measure_name)] = measure_max
min_max_dict['CT_regional_corr_age_m_CBAR_min'] = -0.03
min_max_dict['CT_regional_corr_age_m_CBAR_max'] = -0.01
#min_max_dict['CT_regional_corr_age_m_Uncorr_CBAR_min'] = -0.03
#min_max_dict['CT_regional_corr_age_m_Uncorr_CBAR_max'] = 0.03
min_max_dict['CT_regional_corr_age_c14_CBAR_min'] = 2.5
min_max_dict['CT_regional_corr_age_c14_CBAR_max'] = 3.5
min_max_dict['MT_projfrac+030_regional_corr_age_m_CBAR_min'] = 0.002
min_max_dict['MT_projfrac+030_regional_corr_age_m_CBAR_max'] = 0.007
min_max_dict['MT_projfrac+030_regional_corr_age_c14_CBAR_min'] = 0.8
min_max_dict['MT_projfrac+030_regional_corr_age_c14_CBAR_max'] = 1.0
min_max_dict['PLS1_CBAR_min'] = -0.07
min_max_dict['PLS1_CBAR_max'] = 0.07
min_max_dict['PLS2_CBAR_min'] = -0.07
min_max_dict['PLS2_CBAR_max'] = 0.07
min_max_dict['PLS1_usable_CBAR_min'] = -0.07
min_max_dict['PLS1_usable_CBAR_max'] = 0.07
min_max_dict['PLS2_usable_CBAR_min'] = -0.07
min_max_dict['PLS2_usable_CBAR_max'] = 0.07
min_max_dict['MT_all_mean_min'] = 0.4
min_max_dict['MT_all_mean_max'] = 1.8
min_max_dict['MT_regional_corr_age_m_min'] = -0.008
min_max_dict['MT_regional_corr_age_m_max'] = 0.016
min_max_dict['MT_regional_corr_age_m_CBAR_min'] = -0.007
min_max_dict['MT_regional_corr_age_m_CBAR_max'] = 0.007
min_max_dict['MT_regional_corr_age_c14_min'] = 0.4
min_max_dict['MT_regional_corr_age_c14_max'] = 1.8
min_max_dict['MT_regional_corr_age_c14_CBAR_min'] = 0.4
min_max_dict['MT_regional_corr_age_c14_CBAR_max'] = 1.8
min_max_dict['MT_all_slope_ct_min'] = -5.5
min_max_dict['MT_all_slope_ct_max'] = 2.2
min_max_dict['MT_all_slope_age_vs_mbp_min'] = -0.002
min_max_dict['MT_all_slope_age_vs_mbp_max'] = -0.0006
min_max_dict['MT_all_slope_age_at14_vs_mbp_min'] = 0.01
min_max_dict['MT_all_slope_age_at14_vs_mbp_max'] = 0.08
min_max_dict['Degree_CBAR_min'] = 10
min_max_dict['Degree_CBAR_max'] = 60
min_max_dict['AverageDist_CBAR_min'] = 20
min_max_dict['AverageDist_CBAR_max'] = 70
min_max_dict['Closeness_CBAR_min'] = 0.4
min_max_dict['Closeness_CBAR_max'] = 0.5
return min_max_dict
def get_axis_label_dict():
axis_label_dict = {}
axis_label_dict['Degree'] = 'Degree'
axis_label_dict['von_economo'] = 'Cortical Lamination Pattern'
axis_label_dict['PC'] = 'Participation Coefficient'
axis_label_dict['AverageDist'] = 'Average Distance (mm)'
axis_label_dict['Clustering'] = 'Clustering'
axis_label_dict['Closeness'] = 'Closeness'
axis_label_dict['InterhemProp'] = 'Interhemispheric Connections'
axis_label_dict['CT_regional_corr_age_c14'] = 'CT at 14 yrs (mm)'
axis_label_dict['CT_regional_corr_age_m'] = r'$\Delta$CT (mm/year)'
axis_label_dict['MT_projfrac+030_regional_corr_age_c14'] = 'MT at 14 yrs (PU)'
axis_label_dict['MT_projfrac+030_regional_corr_age_m'] = r'$\Delta$MT (PU/year)'
axis_label_dict['age_scan'] = 'Age (years)'
axis_label_dict['CT_global_mean'] = 'Global CT (mm)'
axis_label_dict['MT_projfrac+030_global_mean'] = 'Global MT (PU)'
axis_label_dict['MT_all_mean'] = 'Mean MT across regions (PU)'
axis_label_dict['MT_all_slope_ct'] = r'$\Delta$MT with CT (PU/mm)'
axis_label_dict['MT_all_slope_age'] = r'$\Delta$MT with age (PU/year)'
axis_label_dict['MT_regional_corr_age_c14'] = 'MT at 14 yrs (PU)'
axis_label_dict['MT_regional_corr_age_m'] = r'$\Delta$MT (PU/year)'
axis_label_dict['mbp'] = 'Myelin Basic Protein'
axis_label_dict['cux'] = 'CUX'
axis_label_dict['oligo'] = 'Oligodendrocyte Expr'
axis_label_dict['mbp_usable'] = 'Myelin Basic Protein'
axis_label_dict['cux_usable'] = 'CUX'
axis_label_dict['oligo_usable'] = 'Oligodendrocyte Expr'
axis_label_dict['x'] = 'X coordinate'
axis_label_dict['y'] = 'Y coordinate'
axis_label_dict['z'] = 'Z coordinate'
axis_label_dict['PLS1'] = 'PLS 1 scores'
axis_label_dict['PLS2'] = 'PLS 2 scores'
axis_label_dict['PLS1_usable'] = 'PLS 1 scores'
axis_label_dict['PLS2_usable'] = 'PLS 2 scores'
axis_label_dict['MT_all_slope_age_at14_vs_mbp'] = 'MT at 14 years\nvs MBP'
axis_label_dict['MT_all_slope_age_vs_mbp'] = r'$\Delta$MT with age\nvsMBP'
return axis_label_dict
def corr_by_agebin(measure_dict_dict, paper_dir, x_measure='Degree_CT_covar_ones_all_COST_10', y_measure='CT_all_slope_age', ax=None, fig=None, label=None):
y = np.array(measure_dict_dict['COMPLETE_EXCLBAD'][y_measure])
m_array = np.zeros(5)
r_array = np.zeros(5)
p_array = np.zeros(5)
for i, age_bin in enumerate(range(1,6)):
cohort = 'AGE_BIN_{}_EXCLBAD'.format(age_bin)
print(cohort)
measure_dict = measure_dict_dict[cohort]
x = np.array(measure_dict[x_measure])
m,c,r,p,sterr,p_perm = permutation_correlation(x, y)
m_array[i] = m
r_array[i] = r
p_array[i] = p
if not ax:
fig, ax = plt.subplots()
ax.plot(range(1,6), m_array, c='b')
ax.scatter(range(1,6), m_array, s=70, c='b')
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
if label:
ax.set_ylabel(label)
ax.set_xticklabels(['', '14-15', '16-17', '18-19', '20-21', '22-24'], rotation=45)
sns.despine()
return ax
def get_circular_layout(G, df):
# Create two empty dictionaries for the
# positions and the normal angle to each
# position (in degrees)
pos_dict = {}
theta_dict = {}
# Make a list of theta values that
# start at 90 and go round the circle
# in a clockwise direction
theta_list = [ t%360 for t in np.arange(450, 90, -360.0/len(df['node'])) ]
# And then fill in those dictionaries!
for i, key in enumerate(df['node'].values):
theta = theta_list[i] * np.pi / 180.0
pos_dict[key] = np.array([np.cos(theta)*0.5, np.sin(theta)*0.5])
theta_dict[key] = theta_list[i]
return pos_dict, theta_dict
def setup_color_list(df, cmap_name='jet', sns_palette=None, measure='module', continuous=False, vmax=1, vmin=0):
'''
Use a colormap to set colors for each value in the
sort_measure and return a list of colors for each node
'''
import matplotlib as mpl
colors_dict = {}
# Figure out how many different colors you need
n = np.float(len(set(df[measure])))
# FOR CONTINUOUS DATA
if continuous:
cNorm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
scalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=cmap_name)
colors_list = [ scalarMap.to_rgba(x) for x in df[measure] ]
# FOR DISCRETE DATA
else:
# Option 1: If you've passed a matplotlib color map
if type(cmap_name) is str:
cmap = plt.get_cmap(cmap_name)
else:
cmap = cmap_name
for i, mod in enumerate(sorted(set(df[measure]))):
colors_dict[mod] = cmap((i+0.5)/n)
# Option 2: If you've passed a sns_color_palette
# (only designed to work with discrete variables)
if not sns_palette is None and not continuous:
color_palette = sns.palettes.color_palette(sns_palette, np.int(n))
for i, mod in enumerate(sorted(set(df[measure]))):
colors_dict[mod] = color_palette[i]
colors_list = [ colors_dict[mod] for mod in df[measure].values ]
return colors_list
def plot_circular_network(G, measure_dict, sort_measure='module', wedge_measure='von_economo', sort_cmap_name='jet_r', wedge_cmap_name='von_economo', node_size=500, edge_list=None, edge_color='k', edge_width=0.2, figure=None, ax=None, show_wedge=False):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
if not edge_list:
edge_list = G.edges()
# Put the measures you care about together
# in a data frame
df = pd.DataFrame({ 'degree' : measure_dict['Degree_CT_covar_ones_all_COST_10'] ,
'module' : measure_dict['Module_CT_covar_ones_all_COST_10'],
'renum_module' : measure_dict['Renumbered_Module_CT_covar_ones_all_COST_10'],
'von_economo' : measure_dict['von_economo'],
'lobes' : measure_dict['lobes'],
'x' : measure_dict['centroids'][:,0],
'y' : measure_dict['centroids'][:,1],
'z' : measure_dict['centroids'][:,2]})
df['node'] = range(len(df['degree']))
# First get the module and wedge color lists in node order
# (This has to be done before you sort the data frame)
von_economo_colors = get_von_economo_color_dict(measure_dict['von_economo'])
if sort_cmap_name == 'von_economo':
sort_cmap_name = mpl.colors.ListedColormap(von_economo_colors.values())
if wedge_cmap_name == 'von_economo':
wedge_cmap_name = mpl.colors.ListedColormap(von_economo_colors.values())
node_colors_list = setup_color_list(df, cmap_name=sort_cmap_name, measure=sort_measure)
wedge_colors_list = setup_color_list(df, cmap_name=wedge_cmap_name, measure=wedge_measure)
# Now sort the df by the measure you care about
df.sort_values(by=[sort_measure, wedge_measure, 'node'], inplace=True)
# Get the positions of node and the normal angle to each position
pos_dict, theta_dict = get_circular_layout(G, df)
# If you've given this code an axis and figure then use those
# otherwise just create your own
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10))
else:
fig = figure
nx.draw_networkx(G,
pos=pos_dict,
node_color=node_colors_list,
node_size=node_size,
edgelist=edge_list,
width=edge_width,
edge_color = edge_color,
with_labels=False,
ax=ax)
if show_wedge:
ax = add_wedge(df, theta_dict, wedge_colors_list, wedge_measure=wedge_measure, ax=ax)
ax.set_xlim(-0.75, 0.75)
ax.set_ylim(-0.75, 0.75)
else:
ax.set_xlim(-0.6, 0.6)
ax.set_ylim(-0.6, 0.6)
ax.axis('off')
return ax
def add_wedge(df, theta_dict, wedge_colors_list, wedge_measure='von_economo', ax=None):
theta_adj = 360.0/(2*len(df['node']))
df.sort(['node'], inplace=True)
for node in df['node'].values:
wedge = mpatches.Wedge((0,0),
r = 0.65, width = 0.1,
theta1=theta_dict[node]-theta_adj,
theta2=theta_dict[node]+theta_adj,
facecolor=wedge_colors_list[node],
edgecolor='none')
ax.add_patch(wedge)
return ax
def plot_anatomical_network(G, NodalMeasures_file, measure='module', orientation='sagittal', cmap_name='jet_r', continuous=False, vmax=None, vmin=None, sns_palette=None, edge_list=None, edge_color='k', edge_width=0.2, node_list=None, rc_node_list=[], node_shape='o', rc_node_shape='s', node_size=500, node_size_list=None, figure=None, ax=None):
'''
Plots each node in the graph in one of three orientations
(sagittal, axial or coronal).
The nodes are sorted according to the measure given
(default value: module) and then plotted in that order.
'''
if edge_list is None:
edge_list = list(G.edges())
if node_list is None:
node_list = G.nodes()
node_list = sorted(node_list)
# Put the measures you care about together
# in a data frame
fields = ['degree','module','closeness','x','y','z']
if measure not in fields:
fields.append(measure)
df = pd.read_csv(NodalMeasures_file, skipinitialspace=True, usecols=fields)
# Add in a node index which relates to the node names in the graph
df['node'] = range(len(df['degree']))
# Then use these node values to get the appropriate positions for each node
pos_dict = {}
pos_dict['axial'], pos_dict['sagittal'], pos_dict['coronal'] = get_anatomical_layouts(G, df)
pos = pos_dict[orientation]
# Create a colors_list for the nodes
colors_list = setup_color_list(df,
cmap_name=cmap_name,
sns_palette=sns_palette,
measure=measure,
vmin=vmin,
vmax=vmax,
continuous=continuous)
# If the node size list is none then
# it'll just be the same size for each node
if node_size_list is None:
node_size_list = [ node_size ] * len(df['degree'])
# If you have no rich club nodes then all the nodes will
# have the same shape
node_shape_list = [ node_shape ] * len(df['degree'])
# If you have set rich nodes then you'll need to replace
# those indices with the rc_node_shape
for i in rc_node_list:
node_shape_list[i] = 's'
# We're going to figure out the best way to plot these nodes
# so that they're sensibly on top of each other
sort_dict = {}
sort_dict['axial'] = 'z'
sort_dict['coronal'] = 'y'
sort_dict['sagittal'] = 'x'
node_order = np.argsort(df[sort_dict[orientation]]).values
# Now remove all the nodes that are not in the node_list
node_order = [ x for x in node_order if x in node_list ]
# If you've given this code an axis and figure then use those
# otherwise just create your own
if not ax:
# Create a figure
fig_size_dict = {}
fig_size_dict['axial'] = (9,12)
fig_size_dict['sagittal'] = (12,8)
fig_size_dict['coronal'] = (9,8)
fig, ax = plt.subplots(figsize=fig_size_dict[orientation])
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2)
else:
fig = figure
# Start by drawing in the edges:
nx.draw_networkx_edges(G,
pos=pos,
edgelist=edge_list,
width=edge_width,
edge_color=edge_color,
ax=ax)
# And then loop through each node and add it in order
for node in node_order:
nx.draw_networkx_nodes(G,
pos=pos,
node_color=colors_list[node],
node_shape=node_shape_list[node],
node_size=node_size_list[node],
nodelist=[node],
with_labels=False,
ax=ax)
axis_limits_dict = {}
axis_limits_dict['axial'] = [ -70, 70, -105, 70]
axis_limits_dict['coronal'] = [ -70, 70, -45, 75 ]
axis_limits_dict['sagittal'] = [ -105, 70, -45, 75 ]
ax.set_xlim(axis_limits_dict[orientation][0],axis_limits_dict[orientation][1])
ax.set_ylim(axis_limits_dict[orientation][2],axis_limits_dict[orientation][3])
ax.axis('off')
return ax
def get_anatomical_layouts(G, df):
'''
This code takes in a data frame that has x, y, z coordinates and
integer node labels (0 to n-1) for n nodes and returns three dictionaries
containing appropriate pairs of coordinates for sagittal, coronal and
axial slices.
'''
axial_dict = {}
sagittal_dict = {}
coronal_dict = {}
for node in df['node'].values:
axial_dict[node] = np.array([df['x'].loc[df['node']==node].values[0],
df['y'].loc[df['node']==node].values[0]])
coronal_dict[node] = np.array([df['x'].loc[df['node']==node].values[0],
df['z'].loc[df['node']==node].values[0]])
sagittal_dict[node] = np.array([df['y'].loc[df['node']==node].values[0],
df['z'].loc[df['node']==node].values[0]])
return axial_dict, sagittal_dict, coronal_dict
def set_conn_types(G, G_edge=None, thresh=75):
if not G_edge:
G_edge = G
# Figure out the degrees from the main graph (G)
deg = dict(G.degree()).values()
# Now calculate the threshold that you're going
# to use to designate a node as a hub or not
hub_thresh = np.percentile(deg, thresh)
# Loop through the edges of the G_edge graph and
# assign the connection type as 2 (hub-hub),
# 1 (hub-peripheral; feeder) or 0 (peripheral-peripheral)
for node1, node2 in G_edge.edges():
if deg[node1] > hub_thresh and deg[node2] > hub_thresh:
G_edge.edge[node1][node2]['conn_type'] = 2
elif deg[node1] > hub_thresh or deg[node2] > hub_thresh:
G_edge.edge[node1][node2]['conn_type'] = 1
else:
G_edge.edge[node1][node2]['conn_type'] = 0
# Return G_edge
return G_edge
def rich_edges_nodes(G, thresh=75):
# Figure out the degrees from the main graph (G)
deg = dict(G.degree()).values()
# Now calculate the threshold that you're going
# to use to designate a node as a hub or not
hub_thresh = np.percentile(deg, thresh)
G = set_conn_types(G, thresh=thresh)
rich_edges = [ (node1, node2) for node1, node2 in G.edges() if G[node1][node2]['conn_type']==2 ]
rich_nodes = [ node for node in G.nodes() if deg[node] > hub_thresh ]
return rich_edges, rich_nodes
def figure_1_replication(measure_dict_D, measure_dict_V, three_cohorts_dir):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2.5)
# Get the set values
min_max_dict_D = get_min_max_values(measure_dict_D)
min_max_dict_V = get_min_max_values(measure_dict_V)
axis_label_dict = get_axis_label_dict()
# Create the big figure
big_fig, ax_list = plt.subplots(1,4, figsize=(40, 8), facecolor='white')
measure_list = ['CT_all_slope_age_at14',
'CT_all_slope_age',
'MT_projfrac+030_all_slope_age_at14',
'MT_projfrac+030_all_slope_age']
for i, measure in enumerate(measure_list):
ax = ax_list.reshape(-1)[i]
DV_min = np.min([min_max_dict_D['{}_min'.format(measure)],
min_max_dict_V['{}_min'.format(measure)]])
DV_max = np.max([min_max_dict_D['{}_max'.format(measure)],
min_max_dict_V['{}_max'.format(measure)]])
if DV_max - DV_min < 0.1:
mul=100
exp = 'x10-2'
else:
mul=1
exp=''
# Put a linear regression for Discovery vs Valication
ax = pretty_scatter(measure_dict_D[measure]*mul,
measure_dict_V[measure]*mul,
x_label='Discovery',
y_label='Validation',
x_min=DV_min*mul, x_max=DV_max*mul,
y_min=DV_min*mul, y_max=DV_max*mul,
marker_size=60,
ax=ax,
figure=big_fig)
# Add a unity line
ax.plot([DV_min*mul, DV_max*mul], [DV_min*mul, DV_max*mul], linestyle='--', color='k')
# Put a title on the subplot
title = axis_label_dict[measure].split(' (')[0]
if not title.endswith('yrs'):
title = '{} with age'.format(title)
ax.set_title(title)
for ax in ax_list[1:]:
ax.set_ylabel('')
plt.tight_layout()
big_fig.savefig(os.path.join(three_cohorts_dir, 'Replication_Figure1.png'), bbox_inches=0, dpi=100)
plt.close(big_fig)
def figure_4_replication(measure_dict_D, measure_dict_V, three_cohorts_dir):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=2.5)
# Get the set values
min_max_dict_D = get_min_max_values(measure_dict_D)
min_max_dict_V = get_min_max_values(measure_dict_V)
axis_label_dict = get_axis_label_dict()
# Define the measures you care about
measure_list = ['Degree', 'Closeness', 'AverageDist', 'Clustering' ]
# Create the big figure
big_fig, ax_list = plt.subplots(1,len(measure_list), figsize=(30, 8), facecolor='white')
for i, measure in enumerate(measure_list):
measure_name = '{}_CT_covar_ones_all_COST_10'.format(measure)
ax = ax_list.reshape(-1)[i]
DV_min = np.min([min_max_dict_D['{}_min'.format(measure_name)],
min_max_dict_V['{}_min'.format(measure_name)]])
DV_max = np.max([min_max_dict_D['{}_max'.format(measure_name)],
min_max_dict_V['{}_max'.format(measure_name)]])
# Put a linear regression for Discovery vs Valication
ax = pretty_scatter(measure_dict_D[measure_name],
measure_dict_V[measure_name],
x_label='Discovery',
y_label='Validation',
x_min=DV_min, x_max=DV_max,
y_min=DV_min, y_max=DV_max,
marker_size=60,
ax=ax,
figure=big_fig)
# Add a unity line
ax.plot([DV_min, DV_max], [DV_min, DV_max], linestyle='--', color='k')
# Put a title on the subplot
title = axis_label_dict[measure].split(' (')[0]
ax.set_title(title)
for ax in ax_list[1:]:
ax.set_ylabel('')
plt.tight_layout()
big_fig.savefig(os.path.join(three_cohorts_dir, 'Replication_Figure4.png'), bbox_inches=0, dpi=100)
plt.close(big_fig)
def results_matrix(measure_dict, covars_name='none', graph='CT_ALL_COVARS_ONES_COST_10', figure_name=None, ax=None, figure=None):
# Get the sub_dict
sub_dict = measure_dict['308']['COVARS_{}'.format(covars_name)]
graph_sub_dict = measure_dict['308']['Graph_measures']
# Make a list of the measures you want to report
# and make sure they're all in sub_dict
measure_list = ['CT_regional_corr_age_c14',
'MT_projfrac+030_regional_corr_age_c14',
'CT_regional_corr_age_m',
'MT_projfrac+030_regional_corr_age_m',
'PLS1_with99s',
'PLS2_with99s',
'Degree',
'Closeness']
sub_dict['Degree'] = graph_sub_dict['Degree_{}'.format(graph)]
sub_dict['Closeness'] = graph_sub_dict['Closeness_{}'.format(graph)]
# Get the variable names
axis_label_dict = get_axis_label_dict()
axis_label_dict['PLS1_with99s'] = axis_label_dict['PLS1']
axis_label_dict['PLS2_with99s'] = axis_label_dict['PLS2']
# Create the figure if you need to
if not ax:
# Create a figure
fig, ax = plt.subplots(figsize=(10, 10), facecolor='white')
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=1.5)
else:
if figure is None:
fig = plt.gcf()
else:
fig = figure
# Make an empty data frame
df = pd.DataFrame()
for measure in measure_list:
df[axis_label_dict[measure]] = sub_dict[measure]
df[axis_label_dict[measure]][df[axis_label_dict[measure]]==-99] = np.nan
# Create a mask to show the diagonal and only the lower triangle
mask = np.zeros_like(df.corr())
mask[np.triu_indices_from(mask, k=1)] = True
# Now plot the heatmap
cbar_ax = fig.add_axes([.87, .48, .02, .47])
cbar_ax.text(-0.05,
0.5,
'Pearson correlation coefficient',
rotation=90,
horizontalalignment='right',
verticalalignment='center',
fontsize='x-large')
ax = sns.heatmap(df.corr(), ax=ax, fmt='+2.2f', square=True, cbar_ax=cbar_ax, annot=True, mask=mask)
# Adjust the x labels
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(45)
label.set_ha('right')
if figure_name:
# Do the tight layout because, again, it looks better!
fig.tight_layout()
# And save the figure
fig.savefig(figure_name, bbox_inches=0, dpi=100)
plt.close(fig)
else:
return ax, cbar_ax
def figs_for_talk(measure_dict, results_dir, talk_figs_dir):
# Set the seaborn context and style
sns.set(style="white")
sns.set_context("poster", font_scale=3)
# Get the various min and max values:
min_max_dict = get_min_max_values(measure_dict)
axis_label_dict = get_axis_label_dict()
# Set up the colormap dictionary
cmap_dict = {}
cmap_dict['CT_all_slope_age_at14'] = 'jet'
cmap_dict['CT_all_slope_age'] = 'winter_r'
cmap_dict['CT_all_slope_age_Uncorr'] = 'RdBu_r'
cmap_dict['MT_projfrac+030_all_slope_age_at14'] = 'jet'
cmap_dict['MT_projfrac+030_all_slope_age'] = 'autumn'
cmap_dict['all_slope_age'] = 'RdBu_r'
cmap_dict['all_slope_age_at14'] = 'jet'
cmap_dict['PLS1'] = 'RdBu_r'
cmap_dict['PLS2'] = 'RdBu_r'
# Set up the left_lat dictionary
left_lat_dict = {}
left_lat_dict['CT_all_slope_age_at14'] = os.path.join(results_dir,
'PNGS',
'SlopeAge_at14_CT_lh_pial_classic_lateral.png')
left_lat_dict['CT_all_slope_age'] = os.path.join(results_dir,
'PNGS',
'SlopeAge_FDRmask_CT_lh_pial_classic_lateral.png')
left_lat_dict['CT_all_slope_age_Uncorr'] = os.path.join(results_dir,
'PNGS',
'SlopeAge_Uncorr_CT_lh_pial_classic_lateral.png')
left_lat_dict['MT_projfrac+030_all_slope_age_at14'] = os.path.join(results_dir,
'PNGS',
'SlopeAge_at14_MT_projfrac+030_lh_pial_classic_lateral.png')
left_lat_dict['MT_projfrac+030_all_slope_age'] = os.path.join(results_dir,
'PNGS',
'SlopeAge_FDRmask_MT_projfrac+030_lh_pial_classic_lateral.png')
left_lat_dict['PLS1'] = os.path.join(results_dir,
'PNGS',
'PLS1_lh_pial_classic_lateral.png')
left_lat_dict['PLS2'] = os.path.join(results_dir,
'PNGS',
'PLS2_lh_pial_classic_lateral.png')
# Make the brain images that you need
for measure in [ 'CT_all_slope_age_at14',
'CT_all_slope_age',
'CT_all_slope_age_Uncorr',
'MT_projfrac+030_all_slope_age_at14',
'MT_projfrac+030_all_slope_age',
'PLS1',
'PLS2' ]:
# Set up the figure
fig, ax = plt.subplots(figsize=(20,6), facecolor='white')
# Set up the grid
grid = gridspec.GridSpec(1,4)
grid.update(left=0.01, right=0.99, top=1.05, bottom=0.2, wspace=0, hspace=0)
# Set up the file list
left_lat_fname = left_lat_dict[measure]
f_list = [ left_lat_fname,
left_lat_fname.replace('lh_pial_classic_lateral', 'lh_pial_classic_medial'),
left_lat_fname.replace('lh_pial_classic_lateral', 'rh_pial_classic_medial'),
left_lat_fname.replace('lh_pial_classic_lateral', 'rh_pial_classic_lateral') ]
# Add the brains
fig = add_four_hor_brains(grid, f_list, fig)
# Set up the colorbar grid
cb_grid = gridspec.GridSpec(1,1)
cb_grid.update(left=0.2,
right=0.8,
bottom=0.2,
top=0.25,
wspace=0,
hspace=0)
fig = add_colorbar(cb_grid[0], fig,
cmap_name=cmap_dict[measure],
cbar_min=min_max_dict['{}_CBAR_min'.format(measure)],
cbar_max=min_max_dict['{}_CBAR_max'.format(measure)],
y_min=min_max_dict['{}_CBAR_min'.format(measure)],
y_max=min_max_dict['{}_CBAR_max'.format(measure)],
label=axis_label_dict[measure.rstrip('_Uncorr')])
# Turn off the axis
ax.set_axis_off()
# Save the figure
figure_name = os.path.join(talk_figs_dir, '{}_FourHorBrains.png'.format(measure))
fig.savefig(figure_name, dpi=100)
# Close the figure
plt.close('all')
# Make the scatter plots you need
x_list = [ 'age_scan', 'age_scan', 'CT_all_slope_age_at14', 'MT_projfrac+030_all_slope_age_at14' ]
y_list = [ 'CT_global_mean', 'MT_projfrac+030_global_mean', 'CT_all_slope_age', 'MT_projfrac+030_all_slope_age' ]
for x_key, y_key in zip(x_list, y_list):
figure_name = os.path.join(talk_figs_dir, 'Scatter_{}_vs_{}.png'.format(x_key, y_key))
fig, ax = plt.subplots(figsize=(10,7), facecolor='white')
if x_key == 'age_scan':
color_measure = y_key.replace('_global_mean', '_all_slope_age')
stat_key = y_key.replace('_mean', '_slope_age')
color_measure_cmap = cmap_dict[color_measure]
norm = mpl.colors.Normalize(vmin=min_max_dict['{}_CBAR_min'.format(color_measure)],
vmax=min_max_dict['{}_CBAR_max'.format(color_measure)])
cmap_converter = mpl.cm.ScalarMappable(norm=norm, cmap=color_measure_cmap)
color = cmap_converter.to_rgba(measure_dict[stat_key])
else:
color='k'
pretty_scatter(measure_dict[x_key],
measure_dict[y_key],
x_label=axis_label_dict[x_key],
y_label=axis_label_dict[y_key],
x_max=min_max_dict['{}_max'.format(x_key)],
x_min=min_max_dict['{}_min'.format(x_key)],
y_max=min_max_dict['{}_max'.format(y_key)],
y_min=min_max_dict['{}_min'.format(y_key)],
color=color,
figure_name=figure_name,
ax=ax,
figure=fig)
# Now the violin plots
for measure in [ 'all_slope_age_at14', 'all_slope_age']:
mpm='MT'
figure_name = os.path.join(talk_figs_dir, 'Violin_{}.png'.format(measure))
violin_mt_depths(measure_dict,
measure=measure,
y_label=axis_label_dict['{}_{}'.format(mpm, measure)],
cmap=cmap_dict[measure],
y_min=min_max_dict['{}_{}_min'.format(mpm, measure)],
y_max=min_max_dict['{}_{}_max'.format(mpm, measure)],
cmap_min=min_max_dict['{}_{}_CBAR_min'.format(mpm, measure)],
cmap_max=min_max_dict['{}_{}_CBAR_max'.format(mpm, measure)],
lam_labels=False,
mpm=mpm,
vert=False,
cbar=True,
figure_name=figure_name)
# Close the figure
plt.close('all')
def read_in_rich_club(RichClub_file):
df = pd.read_csv(RichClub_file)
deg = list(df.pop('degree').values)
rc = list(df.pop('real graph').values)
return deg, rc, df.as_matrix()
def network_summary_fig(corrmat_file, NodalMeasures_file, GlobalMeasures_file, RichClub_file, figures_dir):
M = np.loadtxt(corrmat_file)
G = mg.graph_at_cost(M, 10)
G_02 = mg.graph_at_cost(M, 2)
network_measures_dict = | pd.read_csv(GlobalMeasures_file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Soundclim Utilities
Module with functions useful to compile datasets for SoundClim
VERSION 0.2
Created on Feb 2019 (version 0.1)
Updated on May 2019 (version 0.2)
Updated on July 2019 (version 0.3):
bug on batch_feature_rois solved by removing db_range parameter
@author: <EMAIL>
"""
import pandas as pd
import numpy as np
from os import listdir
import time
import joblib
import matplotlib.pyplot as plt
from sklearn import manifold, preprocessing
from librosa.core import resample
from os import listdir
from maad.rois import find_rois_cwt
import time
from maad import sound
from maad.features import shape_features, centroid, opt_shape_presets, compute_rois_features
from maad.util import format_rois, rois_to_imblobs, normalize_2d
def visual_rois(xdata, idx_highlight=-1, perplexity=50, plot=True):
"""
Computes a 2D visualization of the rois shape features space with t-SNE
Note: the function only selects shape features since they are homogeneous
Parameters:
----------
xdata: pandas dataframe
A data frame with shape ('shp') columns
idx_highlight: ndarray
Boolean array with indices to be highlighted on the image
Returns:
-------
tsne: float
Transformed data by tsne algorithm in a 2D space
"""
# select features
X = xdata
# compute tsne
time_start = time.clock()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0,
verbose=1, perplexity=perplexity)
Y = tsne.fit_transform(X)
time_elapsed = (time.clock() - time_start)
print('\nShape features transformed with t-SNE in', np.round(time_elapsed,3), 's')
if plot is True:
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=[8,8])
ax.scatter(Y[:, 0], Y[:, 1], s = 10, alpha=0.5, c='#1f77b4')
ax.set_xlabel('x-tsne')
ax.set_ylabel('y-tsne')
if idx_highlight is not -1:
ax.scatter(Y[idx_highlight, 0], Y[idx_highlight, 1], s = 20,
alpha=0.5, color='darkorange')
else:
pass
else:
pass
return Y
def batch_find_rois(flist, params_detections, path_audio):
"""
Exports features saved as joblib into a csv file readable by R and other
programs. The joblib file should be computed using the
Parameters:
----------
params_detection: dict
Dictionary with the basic parameters to feed find_rois:
'flims', 'tlen', and 'th'.
path_flist : str
Path to a *.txt file with the list of audio filenames to process
path_audio : str
Path to the place were the dataset of audio files are stored
Returns:
-------
Saves a joblib file to disk. Does not return any variable
"""
# load parameters
flims = params_detections['flims']
tlen = params_detections['tlen']
th = params_detections['th']
detections = list()
for idx, fname in enumerate(flist['fname']):
print(idx+1, '/', len(flist), fname)
s, fs = sound.load(path_audio+fname)
rois = find_rois_cwt(s, fs, flims, tlen, th)
if not rois.empty:
# filter rois shorter than 25% of tlen
idx_rm = (rois.max_t - rois.min_t) < tlen*0.25
rois.drop(index=np.where(idx_rm)[0], inplace=True)
rois.reset_index(inplace=True, drop=True)
else:
pass
# save to list
detections.append({'fname':fname, 'rois':rois})
info_detections = {'detections': detections, 'parameters': params_detections}
return info_detections
def joblib_features_to_csv(path_data, path_save):
"""
Exports features saved as joblib into a csv file readable by R and other
programs. The joblib file should be computed using the
Parameters:
----------
path_data : str
string indicating the path to the joblib file
path_save : str
string with the file name to save the csv
Returns:
-------
Saves a file to disk. Does not return any variable
"""
info_features = joblib.load(path_data)
features = info_features['features']
# get xdata from object features
for idx, file in enumerate(features):
fname = file['fname']
aux_df = file['features']
aux_df['fname'] = fname
if idx is 0:
xdata = aux_df
else:
xdata = pd.concat([xdata, aux_df], axis=0, sort=False)
xdata.reset_index(drop=True, inplace=True)
xdata.to_csv(path_save, index=False, sep=',')
def features_to_csv(features_data, path_save):
"""
Exports features object into a csv file
Parameters:
----------
features_data : str
features object computed with batch_feature_rois
path_save : str
string with the file name to save the csv
Returns:
-------
Saves a file to disk. Does not return any variable
"""
features = features_data['features']
# get xdata from object features
for idx, file in enumerate(features):
fname = file['fname']
aux_df = file['features']
aux_df['fname'] = fname
if idx == 0:
xdata = aux_df
else:
xdata = pd.concat([xdata, aux_df], axis=0, sort=False)
xdata.reset_index(drop=True, inplace=True)
xdata.to_csv(path_save, index=False, sep=',')
def batch_feature_rois(rois_list, params_features, path_audio):
"""
Computes features for a list of files
Parameters:
----------
params_features: dict
Dictionary with the basic parameters to feed find_rois:
'flims', 'tlen', and 'th'.
path_flist : str
Path to a *.txt file with the list of audio filenames to process
path_audio : str
Path to the place were the dataset of audio files are stored
path_save : str
Path with the file name to save the csv
Returns:
-------
Saves a joblib file to disk. Does not return any variable
"""
## TODO: when the time limits are too short, the function has problems
# load parameters
flims = params_features['flims']
opt_spec = params_features['opt_spec']
opt_shape = opt_shape_presets(params_features['opt_shape_str'])
# load detection data
features = []
for idx, file in enumerate(rois_list):
# unpack file values
fname = file['fname']
rois_tf = file['rois']
print(idx+1, '/', len(rois_list), fname)
if rois_tf.empty:
print('< No detection on file >')
features.append({'fname':fname, 'features': pd.DataFrame()})
else:
# load materials: sound, spectrogram
s, fs = sound.load(path_audio+fname)
im, dt, df, ext = sound.spectrogram(s, fs, nperseg=opt_spec['wl'],
overlap=opt_spec['ovlp'], fcrop=flims,
rescale=False, db_range=opt_spec['db_range'])
# format rois to bbox
ts = np.arange(ext[0], ext[1], dt)
f = np.arange(ext[2],ext[3]+df,df)
rois_bbox = format_rois(rois_tf, ts, f, fmt='bbox')
# roi to image blob
im_blobs = rois_to_imblobs(np.zeros(im.shape), rois_bbox)
# get features: shape, center frequency
im = normalize_2d(im, 0, 1)
bbox, params, shape = shape_features(im, im_blobs, resolution='custom',
opt_shape=opt_shape)
_, cent = centroid(im, im_blobs)
cent['frequency']= f[round(cent.y).astype(int)] # y values to frequency
# format rois to time-frequency
rois_out = format_rois(bbox, ts, f, fmt='tf')
# combine into a single df
aux_df = pd.concat([rois_out, shape, cent.frequency], axis=1)
# aux_df['fname'] = fname
features.append({'fname':fname, 'features': aux_df})
# Save data to binary object
info_features = {'features': features,
'parameters_df': params,
'opt_shape': opt_shape,
'opt_spectro': opt_spec}
return info_features
def batch_predict_rois(flist, tuned_clfs, params, path_audio_db='./'):
"""
Predict the labels of rois in a list of audio files.
Parameters
----------
flist: pandas DataFrame
list of audio filenames to be analysed. Column name must be 'fname'
tuned_clfs: dict
data structure with tuned classifiers by grid search or random search
params: dict
data structure with the same parameters used to train the classifiers.
Keys to be included: 'sample_rate_wav', 'flims', 'tlen', 'th',
'opt_spec', 'opt_shape_str'
path_audio_db: str, default current directory
path pointing to the directory where the audio files are located.
Note that all files in flist must be in the same directory
Returns
-------
predictions: dict
data structure with name of audio files as keys. Each element in the
dictionary has a DataFrame with predictions for every region interest
found. Predictions are given as probabilities for three different
classifiers, namely Random Forest ('rf'), Adaboost ('adb') and Support
Vector Machines ('svm').
"""
t_start = time.time() # compute processing time
# Load params and variables
clf_svm = tuned_clfs['svm'].best_estimator_
clf_rf = tuned_clfs['rf'].best_estimator_
clf_adb = tuned_clfs['adb'].best_estimator_
flims = params['flims']
tlen = params['tlen']
th = params['th']
opt_spec = params['opt_spec']
opt_shape = opt_shape_presets(params['opt_shape_str'])
sample_rate_std = params['sample_rate_wav']
# Batch: compute rois, features and predict through files
predictions = dict()
for idx, fname in enumerate(flist['fname']):
print(idx+1, '/', len(flist), fname)
# fname = flist['fname'][0]
s, fs = sound.load(path_audio_db+fname)
# Check sampling frequency on file
if fs==sample_rate_std:
pass
else:
print('Warning: sample rate mismatch, resampling audio file to standard',
sample_rate_std, 'Hz')
s = resample(s, fs, sample_rate_std, res_type='kaiser_fast')
fs = sample_rate_std
rois = find_rois_cwt(s, fs, flims, tlen, th)
if rois.empty:
#print('< No detection on file >')
predictions[fname] = -1
else:
# filter rois shorter than 25% of tlen
idx_rm = (rois.max_t - rois.min_t) < tlen*0.25
rois.drop(index=np.where(idx_rm)[0], inplace=True)
rois.reset_index(inplace=True, drop=True)
if rois.empty:
print('< No detection on file >')
predictions[fname] = -1
else:
# compute features
rois_features = compute_rois_features(s, fs, rois, opt_spec, opt_shape, flims)
# predict
X = rois_features.loc[:,rois_features.columns.str.startswith('shp')]
#X['frequency'] = preprocessing.scale(X['frequency']) # new! scale frequency
pred_rf = pd.DataFrame(data=clf_rf.predict_proba(X),
columns=[s + '_rf' for s in clf_rf.classes_.astype('str')])
pred_adb = pd.DataFrame(data=clf_adb.predict_proba(X),
columns=[s + '_adb' for s in clf_adb.classes_.astype('str')])
pred_svm = pd.DataFrame(data=clf_svm.predict_proba(X),
columns=[s + '_svm' for s in clf_svm.classes_.astype('str')])
# save to variable
pred_proba_file = pd.concat([rois, pred_rf, pred_adb, pred_svm], axis=1)
predictions[fname] = pred_proba_file
t_stop = time.time() # compute processing time
print('Batch process completed. Processing time: ', np.round(t_stop - t_start,2),'s')
return predictions
def listdir_pattern(path_dir, ends_with=None):
"""
Wraper function from os.listdir to include a filter to search for patterns
Parameters
----------
path_dir: str
path to directory
ends_with: str
pattern to search for at the end of the filename
Returns
-------
"""
flist = listdir(path_dir)
new_list = []
for names in flist:
if names.endswith(ends_with):
new_list.append(names)
return new_list
def listdir_pattern(path_dir, ends_with=None):
"""
Wraper function from os.listdir to include a filter to search for patterns
Parameters
----------
path_dir: str
path to directory
ends_with: str
pattern to search for at the end of the filename
Returns
-------
"""
flist = listdir(path_dir)
new_list = []
for names in flist:
if names.endswith(ends_with):
new_list.append(names)
return new_list
def read_audacity_annot (audacity_filename):
"""
Read audacity annotations file (or labeling file) and return a Pandas Dataframe
with the bounding box and the label of each region of interest (ROI)
Parameters
----------
audacity_filename : String
Path to the audacity file
Returns
-------
tab_out : Pandas Dataframe
Colormap type used by matplotlib
References
----------
https://manual.audacityteam.org/man/label_tracks.html
"""
# read file with tab delimiter
tab_in = pd.read_csv(audacity_filename, delimiter='\t', header=None)
# arrange data
t_info = tab_in.loc[np.arange(0,len(tab_in),2),:]
t_info = t_info.rename(index=str, columns={0: 'min_t', 1: 'max_t', 2:'label'})
t_info = t_info.reset_index(drop=True)
f_info = tab_in.loc[np.arange(1,len(tab_in)+1,2),:]
f_info = f_info.rename(index=str, columns={0: 'slash', 1: 'min_f', 2:'max_f'})
f_info = f_info.reset_index(drop=True)
# return dataframe
tab_out = pd.concat([t_info['label'].astype('str'),
t_info['min_t'].astype('float32'),
f_info['min_f'].astype('float32'),
t_info['max_t'].astype('float32'),
f_info['max_f'].astype('float32')], axis=1)
return tab_out
def predictions_to_df(predictions, clf_lab_list):
"""
Parameters
----------
predictions : dict
Prediction element from the function batch_predict_rois
clf_lab_list : list
Names of classifiers to get
Returns
-------
res : pandas DataFrame
Highest score for each classifier
"""
res = pd.DataFrame()
for clf_lab in clf_lab_list:
pred_file = dict()
for fname, pred in predictions.items():
if type(pred) is int: # case where no ROIs were found
pred_file[fname] = 0
else:
# compute argmax on labels
positive_max = np.amax(pred[clf_lab].max())
n_high_prob = (pred.loc[:,clf_lab] > 0.5).sum()
pred_file[fname] = [positive_max, n_high_prob]
pred_file = pd.DataFrame(data=pred_file).transpose()
pred_file = pred_file.reset_index()
pred_file.columns = ['fname',clf_lab,'n_high_proba']
res = | pd.concat([res, pred_file[clf_lab]], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from scipy.spatial import distance as spd
from gibbon.dxfs import MaskSpace
class MyMaskSpace(MaskSpace):
def __init__(self, polygons, vector):
super().__init__(polygons, vector)
@staticmethod
def get_distances(xs, ys):
positions = np.array([xs, ys])
distances = spd.cdist(positions.T, positions.T)
return | pd.DataFrame(distances) | pandas.DataFrame |
import pandas as pd
from unittest import TestCase, mock
from unittest.mock import MagicMock, PropertyMock
from gtfs_kit.feed import Feed
from representation.gtfs_metadata import GtfsMetadata
from representation.gtfs_representation import GtfsRepresentation
from usecase.process_service_date_for_gtfs_metadata import (
process_end_service_date_for_gtfs_metadata,
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
SATURDAY,
SUNDAY,
DATE,
SERVICE_ID,
EXCEPTION_TYPE,
END_DATE_MAP,
CALENDAR_DATE_KEY,
FEED_DATE_KEY,
)
class TestProcessEndServiceDateForGtfsMetadata(TestCase):
def test_process_end_service_date_with_none_gtfs_representation_should_raise_exception(
self,
):
self.assertRaises(TypeError, process_end_service_date_for_gtfs_metadata, None)
def test_process_end_service_date_with_invalid_gtfs_representation_should_raise_exception(
self,
):
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = str
self.assertRaises(
TypeError,
process_end_service_date_for_gtfs_metadata,
mock_gtfs_representation,
)
def test_process_end_service_date_with_dataset_with_missing_files(self):
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_end_service_date_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.end_service_date.assert_not_called()
def test_process_end_service_date_with_dataset_with_missing_fields(self):
mock_feed_info = PropertyMock(return_value=pd.DataFrame({}))
mock_calendar = PropertyMock(return_value= | pd.DataFrame({}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from collections import OrderedDict
from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.compat import product, range
import pandas as pd
from pandas import (
Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat,
date_range)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
class TestPivotTable(object):
def setup_method(self, method):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),
('A', 'd'), ('B', 'a'), ('B', 'b'),
('B', 'c'), ('B', 'd'), ('C', 'a'),
('C', 'b'), ('C', 'c'), ('C', 'd')],
names=['customer', 'product'])
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'],
dropna=True)
exp_index = pd.MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame(
{'values': [1, 2, 3, 4]},
index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ['a', 'b', 'c', 'd']
df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],
'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'C': range(0, 9)})
df['A'] = df['A'].astype(CDT(categories, ordered=False))
result = df.pivot_table(index='B', columns='A', values='C',
dropna=dropna)
expected_columns = Series(['a', 'b', 'c'], name='A')
expected_columns = expected_columns.astype(
CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name='B')
expected = DataFrame([[0, 3, 6],
[1, 4, 7],
[2, 5, 8]],
index=expected_index,
columns=expected_columns,)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(
columns=Categorical(categories)).astype('float')
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],
categories=['low', 'high'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3]},
index=pd.Index(
pd.Categorical.from_codes([0, 1],
categories=['low', 'high'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],
categories=['low', 'high', 'left'],
ordered=True),
'B': range(5)})
result = df.pivot_table(index='A', values='B', dropna=dropna)
expected = pd.DataFrame(
{'B': [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes([0, 1, 2],
categories=['low', 'high', 'left'],
ordered=True),
name='A'))
tm.assert_frame_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'int64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
assert f.dtypes['v'] == 'float64'
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns,values',
[('bool1', ['float1', 'float2']),
('bool1', ['float1', 'float2', 'bool1']),
('bool2', ['float1', 'float2', 'bool1'])])
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame({'float1': v, 'float2': v + 2.0,
'bool1': v <= 2, 'bool2': v <= 3})
df_res = df.reset_index().pivot_table(
index='index', columns=columns, values=values)
result = dict(df_res.dtypes)
expected = {col: np.dtype('O') if col[0].startswith('b')
else np.dtype('float64') for col in df_res}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',
'2011-01-01', '2011-01-02'])
df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},
index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])
exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
'dt': pd.date_range('2011-01-01', freq='D',
periods=5)},
index=idx)
res = df.pivot_table(index=df.index.month,
columns=pd.Grouper(key='dt', freq='M'))
exp_columns = pd.MultiIndex.from_tuples([('A',
pd.Timestamp('2011-01-31'))])
exp_columns.names = [None, 'dt']
exp = pd.DataFrame([3.25, 2.0],
index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(index=pd.Grouper(freq='A'),
columns=pd.Grouper(key='dt', freq='M'))
exp = pd.DataFrame([3],
index=pd.DatetimeIndex(['2011-12-31']),
columns=exp_columns)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
if method:
result = df.pivot('a', 'b', 'c')
else:
result = pd.pivot(df, 'a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
if method:
pv = df.pivot('a', 'b', 'c')
else:
pv = pd.pivot(df, 'a', 'b', 'c')
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row['a'], row['b']] == row['c']
if method:
result = df.pivot('b', 'a', 'c')
else:
result = pd.pivot(df, 'b', 'a', 'c')
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
if method:
pv = df.pivot(index='dt1', columns='dt2')
else:
pv = pd.pivot(df, index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
if method:
pv = df.pivot(index='dt1', columns='dt2', values='data1')
else:
pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_periods(self, method):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
if method:
pv = df.pivot(index='p1', columns='p2')
else:
pv = pd.pivot(df, index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
if method:
pv = df.pivot(index='p1', columns='p2', values='data1')
else:
pv = pd.pivot(df, index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize('values', [
['baz', 'zoo'], np.array(['baz', 'zoo']),
pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='foo', columns='bar', values=values)
else:
result = pd.pivot(df, index='foo', columns='bar', values=values)
data = [[1, 2, 3, 'x', 'y', 'z'],
[4, 5, 6, 'q', 'w', 't']]
index = Index(data=['one', 'two'], name='foo')
columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, 'bar'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('values', [
['bar', 'baz'], np.array(['bar', 'baz']),
pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
])
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
if method:
result = df.pivot(index='zoo', columns='foo', values=values)
else:
result = pd.pivot(df, index='zoo', columns='foo', values=values)
data = [[np.nan, 'A', np.nan, 4],
[np.nan, 'C', np.nan, 6],
[np.nan, 'B', np.nan, 5],
['A', np.nan, 1, np.nan],
['B', np.nan, 2, np.nan],
['C', np.nan, 3, np.nan]]
index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, 'foo'])
expected = DataFrame(data=data, index=index,
columns=columns, dtype='object')
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
'with KeyError GH#19966')
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [['one', 'A', 1, 'x'],
['one', 'B', 2, 'y'],
['one', 'C', 3, 'z'],
['two', 'A', 4, 'q'],
['two', 'B', 5, 'w'],
['two', 'C', 6, 't']]
columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
df = DataFrame(data=data, index=index, columns=columns, dtype='object')
if method:
result = df.pivot(index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
else:
result = pd.pivot(df,
index=('bar', 'first'),
columns=('bar', 'second'),
values=('baz', 'first'))
data = {'A': Series([1, 4], index=['one', 'two']),
'B': Series([2, 5], index=['one', 'two']),
'C': Series([3, 6], index=['one', 'two'])}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('method', [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6],
'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
with pytest.raises(KeyError):
# tuple is seen as a single column name
if method:
df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
else:
pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
assert index_margins.name == (margins_col, '')
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [12, 21, 3, 9, 45],
'shiny': [33, 0, 36, 51, 120]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = df.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=np.sum, fill_value=0)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '
'floats)')
def test_margins_dtype_len(self):
mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]
mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))
expected = DataFrame({'dull': [1, 1, 2, 1, 5],
'shiny': [2, 0, 2, 2, 6]},
index=mi).rename_axis('C', axis=1)
expected['All'] = expected['dull'] + expected['shiny']
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C', margins=True,
aggfunc=len, fill_value=0)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([('D', ['std']), ('E', ['sum'])])
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_pivot_table_with_margins_set_margin_name(self):
# see gh-3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with pytest.raises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with pytest.raises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME> <NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='<NAME>'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
pytest.raises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
pytest.raises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': '<NAME>'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range('1/1/2000', '12/31/2004', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(DataFrame(ts), index=ts.index.year,
columns=ts.index.dayofyear)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range('1/1/2000', '12/31/2004', freq='M')
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,
columns=ts.index.month)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = 'Weekly'
costs = pd.DataFrame(
{'item': ['bacon', 'cheese', 'bacon', 'cheese'],
'cost': [2.5, 4.5, 3.2, 3.3],
'day': ['M', 'M', 'T', 'T']}
)
table = costs.pivot_table(
index="item", columns="day", margins=True,
margins_name=margins_name, aggfunc=[np.mean, max]
)
ix = pd.Index(
['bacon', 'cheese', margins_name], dtype='object', name='item'
)
tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),
('mean', 'cost', margins_name), ('max', 'cost', 'M'),
('max', 'cost', 'T'), ('max', 'cost', margins_name)]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '
'ints)')
def test_categorical_margins_category(self, observed):
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
df.y = df.y.astype('category')
df.z = df.z.astype('category')
table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame({"C1": ["A", "B", "C", "C"],
"C2": ["a", "a", "b", "b"],
"V": [1, 2, 3, 4]})
df["C1"] = df["C1"].astype("category")
result = df.pivot_table("V", index="C1", columns="C2",
dropna=observed, aggfunc="count")
expected_index = pd.CategoricalIndex(['A', 'B', 'C'],
categories=['A', 'B', 'C'],
ordered=False,
name='C1')
expected_columns = pd.Index(['a', 'b'], name='C2')
expected_data = np.array([[1., np.nan],
[1., np.nan],
[np.nan, 2.]])
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame({'Sales': [100, 120, 220],
'Month': ['January', 'January', 'January'],
'Year': [2013, 2014, 2013]})
months = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
df['Month'] = df['Month'].astype('category').cat.set_categories(months)
result = df.pivot_table(values='Sales',
index='Month',
columns='Year',
dropna=observed,
aggfunc='sum')
expected_columns = pd.Int64Index([2013, 2014], name='Year')
expected_index = pd.CategoricalIndex(['January'],
categories=months,
ordered=False,
name='Month')
expected = pd.DataFrame([[320, 120]],
index=expected_index,
columns=expected_columns)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({'col1': [3, 4, 5],
'col2': ['C', 'D', 'E'],
'col3': [1, 3, 9]})
result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9],
['C', 'D', 'E']],
names=['col3', 'col2'])
expected = DataFrame([3, 4, 5],
index=m, columns=['col1'])
tm.assert_frame_equal(result, expected)
result = df.pivot_table(
'col1', index='col3', columns='col2', aggfunc=np.sum
)
expected = DataFrame([[3, np.NaN, np.NaN],
[np.NaN, 4, np.NaN],
[np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name='col3'),
columns=Index(['C', 'D', 'E'], name='col2'))
tm.assert_frame_equal(result, expected)
result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])
m = MultiIndex.from_arrays([['sum'],
['col1']])
expected = DataFrame([3, 4, 5],
index=Index([1, 3, 9], name='col3'),
columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = u'\u0394\u03bf\u03ba\u03b9\u03bc\u03ae'
frame = pd.DataFrame({'foo': [1, 2, 3]})
table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,
margins_name=greek)
index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',
'bar', 'bar', 'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one',
'one', 'two', 'two', 'two', 'one'],
'C': range(11)})
result = pivot_table(data, index='A', columns='B', aggfunc='sum')
mi = MultiIndex(levels=[['C'], ['one', 'two']],
codes=[[0, 0], [0, 1]], names=[None, 'B'])
expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},
('C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index='A', columns='B',
aggfunc=['sum', 'mean'])
mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, 'B'])
expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},
('mean', 'C', 'two'): {'bar': 7.0,
'foo': 6.666666666666667},
('sum', 'C', 'one'): {'bar': 15, 'foo': 13},
('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},
columns=mi).rename_axis('A')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('f, f_numpy',
[('sum', np.sum),
('mean', np.mean),
('std', np.std),
(['sum', 'mean'], [np.sum, np.mean]),
(['sum', 'std'], [np.sum, np.std]),
(['std', 'mean'], [np.std, np.mean])])
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index='A', columns='B', aggfunc=f)
expected = pivot_table(self.data, index='A', columns='B',
aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame({'ind1': np.arange(2 ** 16),
'ind2': np.arange(2 ** 16),
'count': 0})
with pytest.raises(ValueError, match='int32 overflow'):
df.pivot_table(index='ind1', columns='ind2',
values='count', aggfunc='count')
class TestCrosstab(object):
def setup_method(self, method):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
assert result.index.name == 'row_0'
assert result.columns.name == 'col_0'
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])
b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])
c = np.array([3, 4, 3])
expected = pd.DataFrame([[1, 0], [1, 1]],
index=Index([0, 1], name='row_0'),
columns=Index([3, 4], name='col_0'))
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
assert result.index.names == ('a',)
assert result.columns.names == ['b', 'c']
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
# to keep index.name
exp_margin = Series([len(df)], index=Index(['All'], name='a'))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
| tm.assert_series_equal(all_rows, exp_rows) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
from functools import lru_cache
import click
import fsspec
import geopandas
import pandas as pd
import requests
from shapely.geometry import box
crs = "+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs +type=crs"
m2_to_acre = 4046.86
projects = ["ACR273", "ACR274", "ACR255", "CAR1174", "CAR1046", "ACR260"]
years = list(range(1984, 2022))
simplification = {
"ACR273": 250,
"ACR274": 250,
"ACR255": 300,
"ACR260": 100,
"CAR1174": 10,
"CAR1046": 10,
}
simplification_fire = {
"ACR273": 3000,
"ACR274": 500,
"ACR255": 500,
"ACR260": 500,
"CAR1174": 250,
"CAR1046": 250,
}
fire_contributions = {
"ACR273": 0.027,
"ACR274": 0.027,
"ACR255": 0.02,
"ACR260": 0.04,
"CAR1174": 0.04,
"CAR1046": 0.04,
}
termination_dates = {"CAR1046": "9/12/2017"}
@lru_cache
def load_project_db():
retro_uri = requests.get(
"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/database/forest-offsets-database-v1.0.json"
)
retro_json = retro_uri.json()
return retro_json
def buffer_and_simplify(gdf, distance=None):
gdf_out = gdf.copy(deep=True)
gdf_out["geometry"] = [
g.buffer(100).buffer(-100).simplify(distance, preserve_topology=False) for g in gdf.geometry
]
return gdf_out
def load_nifc_fires():
"""load nifc data for 2020/2021 fire season
NB this is a bit of an undocumented NIFC feature -- the data supposedly only cover 2021
but there are definitely 2020 fires included at the endpoint.
This might not be true in the future.
https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-wildland-fire-perimeters-full-history/about
"""
nifc_uri = "https://storage.googleapis.com/carbonplan-data/raw/nifc/WFIGS_-_Wildland_Fire_Perimeters_Full_History.geojson"
fires = geopandas.read_file(nifc_uri)
nifc_colnames = {"poly_IncidentName": "name", "poly_Acres_AutoCalc": "acres"}
fires = fires.rename(columns=nifc_colnames)
fires = fires[fires["irwin_FireDiscoveryDateTime"].str[:4].isin(["2020", "2021"])]
fires["ignite_at"] = (
fires["irwin_FireDiscoveryDateTime"]
.apply(pd.Timestamp)
.apply(lambda x: pd.Timestamp(x.date()))
)
return fires.to_crs(crs)[["name", "acres", "ignite_at", "geometry"]]
def load_mtbs_fires():
"""
load mtbs data
Originally from: https://www.mtbs.gov/direct-download
"""
fire_uri = "https://storage.googleapis.com/carbonplan-data/raw/mtbs/mtbs_perimeter_data/mtbs_perims_DD.json"
fires = geopandas.read_file(fire_uri)
fires = fires[fires["Incid_Type"] == "Wildfire"]
mtbs_colnames = {"Incid_Name": "name", "BurnBndAc": "acres"}
fires = fires.rename(columns=mtbs_colnames)
fires["ignite_at"] = fires["Ig_Date"].apply(pd.Timestamp)
return fires.to_crs(crs)[["name", "acres", "ignite_at", "geometry"]]
def load_fires():
nifc = load_nifc_fires()
mtbs = load_mtbs_fires()
return pd.concat([nifc, mtbs])
def load_project_geometry(opr_id):
path = f"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/database/projects/{opr_id}/shape.json"
gdf = geopandas.read_file(path).to_crs(crs)
return gdf
def load_simple_project(opr_id, distance=None):
gdf = load_project_geometry(opr_id)
gdf = buffer_and_simplify(gdf, distance=distance)
attributes = get_project_attributes(opr_id)
for k, v in attributes.items():
gdf[k] = v
return gdf
def get_project_attributes(opr_id):
retro_json = load_project_db()
project = [project for project in retro_json if project["opr_id"] == opr_id][0]
return {
"start_date": pd.Timestamp(project["rp_1"]["start_date"]),
"acreage": project["acreage"],
"termination_date": pd.Timestamp(termination_dates.get(opr_id, pd.NaT)),
"fire_buffer_contrib": fire_contributions.get(opr_id, -999),
}
def make_project_fires(fires, project_shape, buffer=None, scale=1, distance=None):
center = project_shape.centroid[0]
bounds = project_shape.envelope[0].bounds
if buffer is None:
deltay = (bounds[3] - bounds[1]) / 2
buffer = [deltay * 2 * scale, deltay * scale]
envelope = box(
center.x - buffer[0],
center.y - buffer[1],
center.x + buffer[0],
center.y + buffer[1],
)
fires_proj = fires[fires.intersects(envelope)]
fires_proj["year"] = pd.to_datetime(fires_proj["ignite_at"]).dt.year
fire_years = fires_proj[["year", "geometry"]].dissolve(by="year").reset_index()
fire_years = buffer_and_simplify(fire_years, distance=distance)
fire_years = fire_years.set_index("year").reindex(years).reset_index()
return fire_years
def get_project_fire_stats(fires, opr_id, start_dt, termination_dt):
"""calculate number and area of fires that intersect project area"""
geom = load_project_geometry(opr_id) # use non-buffered shape for area calcs
# includes fires during the window between termination event and actual termination. edge case?
eligible_fires = fires[
(fires["ignite_at"] > start_dt) & (fires["ignite_at"] < termination_dt)
].copy()
project_fires = geopandas.sjoin(eligible_fires, geom.to_crs(fires.crs))
intersect_fires = geopandas.clip(project_fires, geom.to_crs(fires.crs))
burned_acres = intersect_fires.unary_union.area / m2_to_acre # each acre can only burn once
return (len(intersect_fires), burned_acres)
@click.command()
@click.option("--upload-to", type=str, default=None, help="Where to put the workflow contents")
@click.option("--version", type=int, default=0, help="Version to append")
def main(upload_to, version):
print("loading fire data")
fires = load_fires()
for project in projects:
print(f"processing project {project}")
distance = simplification.get(project, 250)
distance_fire = simplification_fire.get(project, 250)
print("-->loading and simplifying project shape")
project_shape = load_simple_project(project, distance=distance)
print("-->calculating project fire statistics")
if pd.notnull(project_shape["termination_date"].iloc[0]):
termination_dt = project_shape["termination_date"].iloc[0]
else:
termination_dt = | pd.Timestamp.today() | pandas.Timestamp.today |
import sys
sys.path.append("./")
import backtrader as bt
from backtrader import plot
import matplotlib.pyplot as plt
import os, sqlite3, config
import pandas as pd
from jinja2 import Environment, FileSystemLoader
from weasyprint import HTML
from utils import timestamp2str, get_now, dir_exists
conn = sqlite3.connect(config.DB_FILE)
class PerformanceReport:
""" Report with performce stats for given backtest run
"""
def __init__(self, stratbt, conn, infilename, user, memo, outputdir, run_id):
self.stratbt = stratbt # works for only 1 stategy
self.infilename = infilename
self.outputdir = outputdir
self.user = user
self.memo = memo
self.check_and_assign_defaults()
self.conn = conn
self.cursor = self.conn.cursor()
self.run_id = run_id
def check_and_assign_defaults(self):
""" Check initialization parameters or assign defaults
"""
if not self.infilename:
self.infilename = 'Not given'
# if not dir_exists(self.outputdir):
# msg = "*** ERROR: outputdir {} does not exist."
# print(msg.format(self.outputdir))
# sys.exit(0)
if not self.user:
self.user = 'GKCap'
if not self.memo:
self.memo = 'No comments'
def get_performance_stats(self):
""" Return dict with performace stats for given strategy withing backtest
"""
st = self.stratbt
dt = st.data._dataname['open'].index
trade_analysis = st.analyzers.myTradeAnalysis.get_analysis()
rpl = trade_analysis.pnl.net.total
total_return = rpl / self.get_startcash()
total_number_trades = trade_analysis.total.total
trades_closed = trade_analysis.total.closed
bt_period = dt[-1] - dt[0]
bt_period_days = bt_period.days
drawdown = st.analyzers.myDrawDown.get_analysis()
sharpe_ratio = st.analyzers.mySharpe.get_analysis()['sharperatio']
sqn_score = st.analyzers.mySqn.get_analysis()['sqn']
kpi = {# PnL
'start_cash': self.get_startcash(),
'rpl': rpl,
'result_won_trades': trade_analysis.won.pnl.total,
'result_lost_trades': trade_analysis.lost.pnl.total,
'profit_factor': (-1 * trade_analysis.won.pnl.total / trade_analysis.lost.pnl.total),
'rpl_per_trade': rpl / trades_closed,
'total_return': 100 * total_return,
'annual_return': (100 * (1 + total_return)**(365.25 / bt_period_days) - 100),
'max_money_drawdown': drawdown['max']['moneydown'],
'max_pct_drawdown': drawdown['max']['drawdown'],
# trades
'total_number_trades': total_number_trades,
'trades_closed': trades_closed,
'pct_winning': 100 * trade_analysis.won.total / trades_closed,
'pct_losing': 100 * trade_analysis.lost.total / trades_closed,
'avg_money_winning': trade_analysis.won.pnl.average,
'avg_money_losing': trade_analysis.lost.pnl.average,
'best_winning_trade': trade_analysis.won.pnl.max,
'worst_losing_trade': trade_analysis.lost.pnl.max,
# performance
'sharpe_ratio': sharpe_ratio,
'sqn_score': sqn_score,
'sqn_human': self._sqn2rating(sqn_score)
}
return kpi
def get_equity_curve(self):
""" Return series containing equity curve
"""
st = self.stratbt
dt = st.data._dataname['open'].index
value = st.observers.broker.lines[1].array[:len(dt)]
curve = pd.Series(data=value, index=dt)
return 100 * curve / curve.iloc[0]
def _sqn2rating(self, sqn_score):
""" Converts sqn_score score to human readable rating
See: http://www.vantharp.com/tharp-concepts/sqn.asp
"""
if sqn_score < 1.6:
return "Poor"
elif sqn_score < 1.9:
return "Below average"
elif sqn_score < 2.4:
return "Average"
elif sqn_score < 2.9:
return "Good"
elif sqn_score < 5.0:
return "Excellent"
elif sqn_score < 6.9:
return "Superb"
else:
return "Holy Grail"
def __str__(self):
msg = ("*** PnL: ***\n"
"Start capital : {start_cash:4.2f}\n"
"Total net profit : {rpl:4.2f}\n"
"Result winning trades : {result_won_trades:4.2f}\n"
"Result lost trades : {result_lost_trades:4.2f}\n"
"Profit factor : {profit_factor:4.2f}\n"
"Total return : {total_return:4.2f}%\n"
"Annual return : {annual_return:4.2f}%\n"
"Max. money drawdown : {max_money_drawdown:4.2f}\n"
"Max. percent drawdown : {max_pct_drawdown:4.2f}%\n\n"
"*** Trades ***\n"
"Number of trades : {total_number_trades:d}\n"
" %winning : {pct_winning:4.2f}%\n"
" %losing : {pct_losing:4.2f}%\n"
" avg money winning : {avg_money_winning:4.2f}\n"
" avg money losing : {avg_money_losing:4.2f}\n"
" best winning trade: {best_winning_trade:4.2f}\n"
" worst losing trade: {worst_losing_trade:4.2f}\n\n"
"*** Performance ***\n"
"Sharpe ratio : {sharpe_ratio:4.2f}\n"
"SQN score : {sqn_score:4.2f}\n"
"SQN human : {sqn_human:s}"
)
kpis = self.get_performance_stats()
# see: https://stackoverflow.com/questions/24170519/
# python-# typeerror-non-empty-format-string-passed-to-object-format
kpis = {k: -999 if v is None else v for k, v in kpis.items()}
return msg.format(**kpis)
def plot_equity_curve(self, fname='equity_curve.png'):
""" Plots equity curve to png file
"""
curve = self.get_equity_curve()
buynhold = self.get_buynhold_curve()
xrnge = [curve.index[0], curve.index[-1]]
dotted = pd.Series(data=[100, 100], index=xrnge)
fig, ax = plt.subplots(1, 1)
ax.set_ylabel('Net Asset Value (start=100)')
ax.set_title('Equity curve')
_ = curve.plot(kind='line', ax=ax)
_ = buynhold.plot(kind='line', ax=ax, color='grey')
_ = dotted.plot(kind='line', ax=ax, color='grey', linestyle=':')
return fig
def _get_periodicity(self):
""" Maps length backtesting interval to appropriate periodiciy for return plot
"""
curve = self.get_equity_curve()
startdate = curve.index[0]
enddate = curve.index[-1]
time_interval = enddate - startdate
time_interval_days = time_interval.days
if time_interval_days > 5 * 365.25:
periodicity = ('Yearly', 'Y')
elif time_interval_days > 365.25:
periodicity = ('Monthly', 'M')
elif time_interval_days > 50:
periodicity = ('Weekly', '168H')
elif time_interval_days > 5:
periodicity = ('Daily', '24H')
elif time_interval_days > 0.5:
periodicity = ('Hourly', 'H')
elif time_interval_days > 0.05:
periodicity = ('Per 15 Min', '15M')
else: periodicity = ('Per minute', '1M')
return periodicity
def plot_return_curve(self, fname='return_curve.png'):
""" Plots return curve to png file
"""
curve = self.get_equity_curve()
period = self._get_periodicity()
values = curve.resample(period[1]).ohlc()['close']
# returns = 100 * values.diff().shift(-1) / values
returns = 100 * values.diff() / values
returns.index = returns.index.date
is_positive = returns > 0
fig, ax = plt.subplots(1, 1)
ax.set_title("{} returns".format(period[0]))
ax.set_xlabel("date")
ax.set_ylabel("return (%)")
_ = returns.plot.bar(color=is_positive.map({True: 'green', False: 'red'}), ax=ax)
return fig
def generate_html(self):
""" Returns parsed HTML text string for report
"""
basedir = os.path.abspath(os.path.dirname(__file__))
images = os.path.join(basedir, 'report_templates')
eq_curve = os.path.join(images, 'equity_curve.png')
rt_curve = os.path.join(images, 'return_curve.png')
fig_equity = self.plot_equity_curve()
fig_equity.savefig(eq_curve)
fig_return = self.plot_return_curve()
fig_return.savefig(rt_curve)
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("report_templates/template.html")
header = self.get_header_data()
kpis = self.get_performance_stats()
graphics = {'url_equity_curve': 'file://' + eq_curve,
'url_return_curve': 'file://' + rt_curve
}
all_numbers = {**header, **kpis, **graphics}
html_out = template.render(all_numbers)
return html_out
def generate_return_data(self):
curve = self.get_equity_curve()
period = self._get_periodicity()
values = curve.resample(period[1]).ohlc()['close']
# returns = 100 * values.diff().shift(-1) / values
returns = 100 * values.diff() / values
returns.index = returns.index.date
returns = pd.Series(data=returns, index=returns.index)
d = {'datetime':returns.index, 'value':returns.values}
return_df = pd.DataFrame(d,columns=['datetime','value'])
return_df.reset_index(drop=True, inplace=True)
return_df.set_index('datetime', inplace=True)
print(f"return_df columns: {return_df.columns}")
return_df.index = pd.to_datetime(return_df.index)
cursor = self.conn.cursor()
# log in db
cursor.execute("""
DROP TABLE return_data
""")
self.conn.commit()
cursor.execute("""
CREATE TABLE return_data (
datetime BLOB,
value REAL
)
""")
self.conn.commit()
return_df.to_sql(name='return_data', con=conn, if_exists='replace', index=True)
self.conn.commit()
def generate_curve_data(self):
# Make function and loop through data at some point
curve = self.get_equity_curve()
buynhold = self.get_buynhold_curve()
curve_df = | pd.DataFrame(curve) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Series, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Series, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = | Series(vals) | pandas.Series |
import numpy as np
import os, glob, re
import pandas as pd
import struct
from scipy.ndimage import filters # uniform filter and others
from tkinter import filedialog
from operator import itemgetter
from itertools import groupby
from scipy.signal import argrelmin
from PIL import Image
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
class AESquantmap():
''' Data structure for auger quant maps (using multiplex mode) '''
def __init__(self, directory):
# os.chdir(directory) # use loader instead
self.directory=directory # single quantmap per directory
self.pixarray = pd.DataFrame() # linkage between pixels in quant map and spe data files
self.specregions = None # elem, evrange, etc. info from multiplex
self.uniquename = None # unique prefix string for all files
# pixarray csv contains instructions to assemble spectral image from assorted files
self.loadQMpixarray()
self.aesquantparams = pd.DataFrame()
# all info on Auger peaks
self.loadAESquantparams()
self.QMfilename = None # name of existing npy file
self.specimage = np.empty([0,0,0]) # 3d numpy array w/ entire multiplex
self.dim = None # (X, Y, Z) of spectral image
self.shiftmaps = [] # peakshift 2D array of dimension X * Y (one per element)
self.amplmaps = [] # s7d7 amplitudes 2D array of dimension X * Y (one per element)
self.integmaps = []
self.peakintmaps = [] # max intensity values at direct peak
self.elemmaps = [] # for storage of normalized element quant maps
self.specfilter = None # short term storage for filtered spectrum
self.energy = None # multiplex energy values (sorted)
self.evbreaks = None # multiplex energy breaks
self.spectralregs=pd.DataFrame()
self.elements = [] # returned by get_elemdata
self.elemdata = []
self.loadspecimage() # attempt load/create of spectral image
# calls makespecimage if it doesn't exist
# also calls get_spectral_regs and get_elemdata
self.extracted = None # temp spectrum extracted from
self.extracts7d7 = None
self.exttype = None # pixel or lasso
self.quant =[] # list for quant results from extracted spectrum
self.quant2=[] # quant results from direct integ (not deriv)
self.derivparams=[] # pospeak and negpeak vals for deriv spectral plot
self.integparams=[] # peak intensity and energy for direct spectral plot
# Go ahead and attempt load of existing amplmaps, integmaps
self.load_maps()
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
print('AESquantparams loaded')
def loadQMpixarray(self):
''' Load of standard files from main Auger data directory
button linked or auto-loaded? '''
os.chdir(self.directory)
pixfiles=glob.glob('*QMpixarray.csv')
if len(pixfiles)==1:
self.pixarray=pd.read_csv(pixfiles[0])
if 'Filename' not in self.pixarray:
print('spe data file names not yet linked with quantmap pixel array.')
# file naming conventions is 'uniquestring'+_QMpixarray.csv
self.uniquename = pixfiles[0].split('_QMpixarray.csv')[0]
print('QMpix array loaded')
else:
print("Couldn't locate single pixarray definition file")
if 'Filename' not in self.pixarray.columns:
print('QM pixels not yet linked to multiplex files')
self.link_files()
def loadspecimage(self):
''' Load existing numpy specimage array (if already created) '''
os.chdir(self.directory)
npyfiles=glob.glob('*specimage.npy')
if len(npyfiles)==1:
self.specimage=np.load(npyfiles[0])
self.dim=self.specimage.shape
self.QMfilename=npyfiles[0]
self.get_spectral_regs() # get energy structure & eVbreaks
self.get_elemdata() # extracts elements and elemdata
print('Spectral image ', self.QMfilename,' loaded')
else:
print("Couldn't locate single spectral image numpy 3D array in current directory")
# Attempt to create this from pix array and all spe files
self.make_specimage()
def save_specimage(self):
''' Called by menu to GUIroi '''
if self.specimage.shape[0]==0:
return
fname=self.directory+'/'+self.uniquename+'_specimage.npy'
np.save(fname,self.specimage)
print('Spectral image array saved.')
def save_pixarray(self):
''' Called after link_files finds correct associated spe files '''
if self.pixarray.empty:
return
fname=self.uniquename + '_QMpixarray.csv'
self.pixarray.to_csv(fname, index=False)
print('Altered pixarray csv file saved.')
def make_specimage(self):
''' Create 3D numpy array from underlying multiplex files '''
print('Making spectral image')
# Check to ensure file linkage made in QMpixarray
if self.pixarray.empty:
print('QM pix array file is not loaded')
return
if 'Filename' not in self.pixarray.columns:
print('QM pixels not yet linked to multiplex files.. link_files!!')
self.link_files()
# Get energy range for multiplex from first data file (same for all pixels)
# get energy values, evbreaks, and multiplex details from first spe file
self.get_spectral_regs()
xmax=self.pixarray.Xindex.max()
ymax=self.pixarray.Yindex.max()
# make 3D blank numpy array of dimension xmax+1 * ymax+1 * len(energy)-duplicates
# using set since inadvertent duplicate ev vals are removed (using keep_best_vals)
self.specimage=np.empty([xmax+1,ymax+1,len(set(self.energy))])
# Now extract counts from all areas in each multiplex (20 max per spe)
spefilelist=np.ndarray.tolist(self.pixarray.Filename.unique())
for i, file in enumerate(spefilelist):
thisfile=self.pixarray[self.pixarray['Filename']==file]
numareas=len(thisfile)
# dataframe containing all spatial areas from this spe
thismult=self.get_multiplex_data(file, numareas)
thismult=self.keep_best_vals(thismult)
if thismult.empty:
print('File missing ... no data for ', file)
continue
for index, row in thisfile.iterrows():
xind=thisfile.loc[index]['Xindex']
yind=thisfile.loc[index]['Yindex']
thiscolnum=thisfile.loc[index]['Subnumber']+1
if len(thismult)!=self.specimage.shape[2]:
print('spe spectrum has different length than specimage array!')
continue
else:
self.specimage[xind, yind,:]=thismult['Counts'+str(thiscolnum)]
# now remove duplicate eV vals
self.energy=list(set(self.energy))
self.energy.sort() # data vals are auto-sorted in get_multiplex_data
print('Spectral image created.')
self.dim=self.specimage.shape # Get spectral image dimensions
self.get_elemdata() # now grab elemdata
def keep_best_vals(self, thismult):
'''For duplicate energy values in multiplex scans, keep the one with
largest # of sweeps '''
# make temp index ranges for spectralregs (can't use energy values)
start=0
self.spectralregs['Start']=0 # for index #s
self.spectralregs['End']=0
for index,row in self.spectralregs.iterrows():
lower=self.spectralregs.loc[index]['Lower']
upper=self.spectralregs.loc[index]['Upper']
thisrange=int(upper-lower)
self.spectralregs=self.spectralregs.set_value(index,'Start',start)
self.spectralregs=self.spectralregs.set_value(index,'End',start+thisrange)
start=start+thisrange+1 # adjust for next loop
dupl=thismult.duplicated(['Energy'], keep=False) # only duplicate vals and keep both
dupl=thismult.loc[dupl]
energyvals=dupl.Energy.unique()
energyvals=np.ndarray.tolist(energyvals)
removelist=[]
for i, val in enumerate(energyvals):
thismatch=dupl[dupl['Energy']==val]
if len(thismatch)!=2:
print('Unknown error in energy duplicate elimination')
continue
else: # pick counts value with highest number of sweeps (best value)
index1=thismatch.index[0]
index2=thismatch.index[1]
specmatch1=self.spectralregs[(self.spectralregs['Start']<=index1)&(self.spectralregs['End']>=index1)]
specmatch2=self.spectralregs[(self.spectralregs['Start']<=index2)&(self.spectralregs['End']>=index2)]
try:
if specmatch1.iloc[0]['Sweeps']>=specmatch2.iloc[0]['Sweeps']:
# first is best value... remove it from dupl (which will be used as knockout df)
removelist.append(index1)
else:
removelist.append(index2)
except:
print('Problem with duplicate removal of ', index1, index2)
thismult=thismult[-thismult.index.isin(removelist)]
print (len(removelist), ' duplicated energy values removed from multiplex')
return thismult
def get_spectral_regs(self):
''' Gets energy range and info about spectral regions that comprise the underlying multiplex spectra
run once on first spectra? '''
print('Loading spectral regions.')
if self.pixarray.empty:
print('QM pixarray file must be loaded')
return
AugerFileName=self.pixarray.iloc[0]['Filename']
filenumber=AugerFileName.split('.')[1].split('.')[0]
# check for file in cwd or sub
if not os.path.exists(self.directory+'/'+AugerFileName):
if not os.path.exists(self.directory+'/sub/'+AugerFileName): # check sub directory
print('Datafile ', AugerFileName,' not found in cwd or sub directories.')
else:
AugerFileName='/sub/'+AugerFileName
if not os.path.exists(self.directory+'/'+AugerFileName):
print('Failed load of spectral regs... first data file not found')
return
with open(self.directory+'/'+AugerFileName, 'rb') as file:
filedata = file.read()
end=filedata.find(b'EOFH')
headerdata=filedata[0:end+6] # works to cut off binary part
header=headerdata.decode(encoding='cp437') # more generic encoding than utf-8
# get number of cycles and time per step from header
tempstring=header.split('NumCycles: ')[1] # find # cycles (works for either survey or multiplex)
match=re.search(r'\d+',tempstring)
if match:
numcycles=int(match.group(0))
tempstring=header.split('TimePerStep:')[1] # find time per step
match=re.search(r'\d+\.\d+',tempstring)
if match:
timestep=float(match.group(0))
# Spectral image data cube signal always sorted by energy (in get_multiplex_data)
# need to auto-sort here as well
tempstring=header.split('NoSpectralReg: ')[1] # unlike NoSpectralRegFull inactives are already removed
match=re.search(r'\d+',tempstring)
numdefregions=int(match.group(0)) # number of defined regions (can be larger than # active regions)
self.spectralregs=pd.DataFrame(columns=['Filenumber','Filename','Numcycles','Timestep','Element','Sweeps','Evstep','Lower','Upper','Time'])
numregions=0 # active regions (as opposed to nominal defined regions)
timeperarea=0 #initialize as zero
self.energy=[] # list for energy x values
self.evbreaks=[0] # region boundaries needed for smoothdiff include first
for i in range(numdefregions):
tempstr=tempstring.split('SpectralRegDef: ')[i+1] # should work to split
element=(tempstr.split(' ')[2]) # name of elemental line
numpts=int(tempstr.split(' ')[4])
evstep=float(tempstr.split(' ')[5]) #eV/step
startev=float(tempstr.split(' ')[6]) # starting eV
endev=float(tempstr.split(' ')[7]) # ending eV
for j in range(0,numpts): # generate energy values for survey
self.energy.append(startev+evstep*j) # cannot remove energy duplicates yet
self.evbreaks.append(len(self.energy)-1) # gives indices of discontinuities in energy scan (needed for smoothdiffS7D7)
tempstr=tempstr.split('SpectralRegDef2: ')[1]
sweeps=int(tempstr.split(' ')[2]) # number of sweeps through element
time=numcycles*timestep*sweeps*(endev-startev)/1000 # acquisition time in seconds for this multiplex region
timeperarea+=time # add on acquisition time for this element eV range only
# add ith new data row to spatialareas dataframe
# make a list and then append as row?
self.spectralregs.loc[i]=[filenumber, AugerFileName, numcycles, timestep, element, sweeps, evstep, startev, endev, time]
numregions+=1
self.spectralregs=self.spectralregs.sort_values(['Lower'])
self.spectralregs=self.spectralregs.reset_index(drop=True)
# make indexrange showing Ev range of elements as index #s with specimage
self.spectralregs['Start']=np.nan
self.spectralregs['End']=np.nan
# Need to remove overlapping vals from spectralregs (keep one w/ large # of sweeps)
# These duplicates are removed from multiplex spectra and energy elsewhere
for i in range(0, len(self.spectralregs)-1):
if self.spectralregs.iloc[i+1]['Lower']<=self.spectralregs.iloc[i]['Upper']:
redval=self.spectralregs.iloc[i]['Upper']-self.spectralregs.iloc[i+1]['Lower']+1
# remove overlapping vals from inferior one
if self.spectralregs.iloc[i+1]['Sweeps']>self.spectralregs.iloc[i]['Sweeps']:
self.spectralregs=self.spectralregs.set_value(i+1,'Lower',self.spectralregs.iloc[i+1]['Lower']+redval)
else:
self.spectralregs=self.spectralregs.set_value(i,'Upper',self.spectralregs.iloc[i]['Upper']-redval)
count=0
for index, row in self.spectralregs.iterrows():
thisrange=row.Upper-row.Lower+1
# zero based indexing
self.spectralregs=self.spectralregs.set_value(index, 'Start', count)
self.spectralregs=self.spectralregs.set_value(index, 'End', count+thisrange-1)
count+=thisrange
# Rename certain peaks/elem to align peak naming conventions (generally main peak w/o appended number)
peakdict={'Mg2':'Mg','Si2':'Si','S1':'S','Fe3':'Fe'}
for key, val in peakdict.items():
self.spectralregs['Element']=self.spectralregs['Element'].str.replace(key, val)
self.energy.sort() # extracted multiplex spectra always sorted by increasing eV
''' Get rid of accidental duplicated energy vals .. keep_best_values removes
these from multiplex spectra themselves
'''
print('Spectral regs loaded.',str(len(self.energy)), ' energy values.')
def get_elemdata(self, **kwargs):
''' Return index ranges for each element's peak, low background, and high background
these are sometimes needed separate from spectralregs
Elemdata contains:
0) elem/peak name
1) order in original scan
2) indices of peak range (list)
3) indices of low back range (list) 4) indices of high back range (list)
5) energy vals of peak range (list) 6) energy vals of low back range (list)
7) energy vals of high back range (list)
8) energy of ideal direct peak (float; set to nan if not found)
9) corresponding index of ideal peak (nan if not within data range).. this is possible
if significant peak shift is occurring and predefined shift can be built in to
QM multiplex
10) ideal negpeak ev (position of negative peak in smooth-diff deriv s7d7)..
slightly higher than idealev (9 in list)
11) typical integration peak width
12) chargeshift -- # of eV of charge-compensation shift applied to scan
figure out by comparison of AESquantparams w/ scan center
13) peakwidth - element specific negpeak-pospeak
14) searchwidth - region
kwarg: Elements - explicit element list (normally autogenerated)...
only needed if multiplex setup is unusual
'''
print('Loading element data.')
if self.spectralregs.empty or self.aesquantparams.empty:
print('Cannot load element data... extract spectral regs from multiplex and load AESquantparams!')
return
self.elemdata=[] # list of lists with important element data
self.elements=[]
# determine if spectral regs are split into OL, O, OH or not
Elements=np.ndarray.tolist(self.spectralregs.Element.unique())
if self.spectralregs.Element[0].endswith('L'):
# works only with OL, O, OH type setup (not continuous single O region)
Elements=Elements[1::3]
# if not this structure then above Elements list should be correct
validelem=np.ndarray.tolist(self.aesquantparams['element'].unique())
missing=[elem for elem in Elements if elem not in validelem]
if len(missing)!=0:
print(','.join(missing),' are not valid elements/peaks in AESquantparams')
self.elements=Elements
print('Elements are',','.join(Elements))
# Ensure all are standard peak names
for i, elem in enumerate(Elements):
thispeak=self.spectralregs[self.spectralregs['Element']==elem]
lowback=self.spectralregs[self.spectralregs['Element']==elem+'L']
hiback=self.spectralregs[self.spectralregs['Element']==elem+'H']
match=self.aesquantparams[self.aesquantparams['element']==elem]
if len(match)!=1 | len(thispeak)!=1 :
print('Element', elem,' not found in AESquantparams or associated peak missing')
continue
idealev=int(match.iloc[0]['negpeak']+match.iloc[0]['integpeak'])
idealnegpeak=int(match.iloc[0]['negpeak'])
width=int(match.iloc[0]['integwidth'])
# typical distance between neg and pospeaks in smooth-diff data
peakwidth=int(match.iloc[0]['peakwidth'])
searchwidth=int(match.iloc[0]['searchwidth'])
kfact=int(match.iloc[0]['kfactor']) # kfactor associated with smooth-diff method
kfact2=int(match.iloc[0]['kfactor2'])
mass=int(match.iloc[0]['mass'])
# Find associated index value in this dataset
peakrange=[j for j in range(int(thispeak.iloc[0]['Lower']),int(thispeak.iloc[0]['Upper']))]
minrange=int(thispeak.iloc[0]['Start'])
maxrange=int(thispeak.iloc[0]['End'])
minevrange=float(thispeak.iloc[0]['Lower'])
maxevrange=float(thispeak.iloc[0]['Upper'])
# Some QM scans have built in charging compensation
chargeshift=int((minevrange + maxevrange)/2 - idealev)
try:
val=peakrange.index(idealev)
indrange=[j for j in range(int(thispeak.iloc[0]['Start']),int(thispeak.iloc[0]['End']))]
idealind=indrange[val]
except:
idealind=np.nan # ideal peak not in range (due to chargeshift)
if len(lowback)==1 & len(hiback)==1:
minlow=int(lowback.iloc[0]['Start'])
maxlow=int(lowback.iloc[0]['End'])
minhigh=int(hiback.iloc[0]['Start'])
maxhigh=int(hiback.iloc[0]['End'])
minevlow=float(lowback.iloc[0]['Lower'])
maxevlow=float(lowback.iloc[0]['Upper'])
minevhigh=float(hiback.iloc[0]['Lower'])
maxevhigh=float(hiback.iloc[0]['Upper'])
else: # handle continuous scan case
# Artificially split single peak region into low, peak, high
# Set index numbers
fullrange=maxrange-minrange
minlow=minrange
maxlow=int(minrange+fullrange*0.25)
minhigh=int(maxrange-fullrange*0.25)
maxhigh=maxrange
minrange=maxlow+1
maxrange=minhigh-1
# Set these values in eV
fullrange=maxevrange-minevrange
minevlow=minevrange
maxevlow=int(minevrange+fullrange*0.25)
minevhigh=int(maxevrange-fullrange*0.25)
maxevhigh=maxevrange
minevrange=maxevlow+1
maxevrange=minevhigh-1
self.elemdata.append([elem, i, [minrange, maxrange], [minlow, maxlow], [minhigh, maxhigh],
[minevrange, maxevrange], [minevlow, maxevlow], [minevhigh, maxevhigh], idealev, idealind,
idealnegpeak, width, chargeshift, peakwidth, searchwidth, kfact, kfact2, mass])
print('Elem data loaded with ',str(len(self.elemdata)), ' regions.')
def get_multiplex_data(self, AugerFileName, numareas):
''' Extracts multiplex spectra from all spatial areas within single spe multiplex file
also uses energy and evbreaks attribs '''
if not os.path.exists(self.directory+'/'+AugerFileName):
if os.path.exists(self.directory+'/sub/'+AugerFileName):
AugerFileName='sub/'+AugerFileName # open from sub directory
try:
with open(self.directory+'/'+AugerFileName, 'rb') as file:
filedata = file.read()
except:
print(AugerFileName," missing from data directory")
return pd.DataFrame() # return empty frame?
end=filedata.find(b'EOFH')
bindata=filedata[end+6:] # binary data section of file (header removed)
mystruct=struct.Struct('f')
# binary header of variable length.. just count backward using known data length
startbyte=len(bindata)-4*numareas*len(self.energy)
# TEST for correct byte location... data regions begins right after last occurence of triple zero 4 byte float values.. for survey usually found in bytes 100:111
for i in range(startbyte-12,startbyte,4):
byteval=bindata[i:i+4]
unpackbyte=mystruct.unpack(byteval) # little endian encoding
if unpackbyte[0]!=0:
print('Possible binary read error... leading zeros are missing for ',AugerFileName)
# create data frame for multiplex and assign energy values
multiplex=pd.DataFrame() # data frame for energy, and counts col for each area
multiplex['Energy']=self.energy # energy values found in SpectralRegions and passed as argument
# Read in and convert all numeric values
alldata=[] # single list for all Auger counts values in file
for i in range(startbyte,len(bindata),4):
byteval=bindata[i:i+4]
unpackbyte=mystruct.unpack(byteval)
alldata.append(unpackbyte[0])
if len(alldata)!=len(self.energy)*numareas: # number of data values expected for each area
print('Possible binary data reading error: Data string length does not match expected value for ',AugerFileName)
''' Multiplex file structure has same energy region of all areas bundled together
(split counts into spectral regions based on evbreaks)so organization is
multiplex spectral region 1 (all areas), spectral region 2 (all areas), etc.
'''
datachunks=[]
for i in range(0,len(self.evbreaks)-1):
datachunks.append(self.evbreaks[i+1]-self.evbreaks[i])
datachunks[0]=datachunks[0]+1 # adjust for length of first regions
datachunks=[i*numareas for i in datachunks] # total lengths of each spectral region
databoundaries=[]
for i,val in enumerate(datachunks):
temp=datachunks[0:i]
databoundaries.append(sum(temp))
databoundaries.append(sum(datachunks))
specregs=[] # list of lists containing counts values for each spectral region
# now split data into single spectral region with all spatial areas
for i in range(0,len(databoundaries)-1):
specregs.append(alldata[databoundaries[i]:databoundaries[i+1]])
# Now construct counts for each area
counts=[[] for x in range(0,numareas)] # list of empty lists one for each spatial area
for i,vals in enumerate(specregs):
# vals is list of count values for this spectral region (for each spatial area back to back)
numvals=int(datachunks[i]/numareas) # number of values in single area/single spectral region
for j in range(0, numareas):
counts[j].extend(vals[j*numvals:(j+1)*numvals]) # gets all counts columns
for i in range(1,numareas+1):
cntsname='Counts'+str(i) # name for nth column is counts2, counts3, etc.
multiplex[cntsname]=counts[i-1] # assign counts to frame (and switch from 0 based indexing)
# Solve multiplex out of order problem
if not multiplex.Energy.is_monotonic: # energy values out of order problem
multiplex=multiplex.sort_values(['Energy']) # sort before returning
return multiplex
def link_files(self):
''' Create links between pix array and data structure (before'''
# Prompt to get first spe file of this quantmap
fullpath= filedialog.askopenfilename(title='Open first spe file of quantmap',
filetypes=[("spectrum","*.spe")])
(directory, filename)=os.path.split(fullpath)
basename=filename.split('.')[0]
startnum=int(filename.split('.')[1])
print('First file of quantmap is', basename, ' ', str(startnum))
self.pixarray['Filename']='' # initialize new string column
for index,row in self.pixarray.iterrows():
self.pixarray=self.pixarray.set_value(index, 'Filename',
basename+'.'+str(startnum+index//20)+'.spe')
# Ensure that expected spe data files are present
self.check_missing_spe(directory)
def check_missing_spe(self, directory):
''' If link files is invoked, check to ensure all spe files exist in
proper directory '''
spefiles=np.ndarray.tolist(self.pixarray.Filename.unique())
datafiles=glob.glob(directory+'\\*.spe')+glob.glob(directory+'\\sub\\*.spe')
datafiles=[s.replace('sub\\','') for s in datafiles]
print(len(datafiles), 'files found')
missing=[f for f in spefiles if f not in datafiles]
if len(missing)!=0:
try:
fnums=[int(i.split('.')[1].split('.')[0]) for i in missing]
franges=[] # ranges of files for missing file output
# TODO FIX this groupby can have int, str datatype problems
for key, group in groupby(enumerate(fnums), lambda x: x[0]-x[1]):
thisgroup=list(map(itemgetter(1), group))
if len(thisgroup)>1:
# more than one consecutive so group as min-max in frange
franges.append(str(min(thisgroup))+'-'+ str(max(thisgroup)))
else:
franges.append(str(thisgroup[0])) # single non-consecutive filenumber
print('Filenumbers ',', '.join(franges),' are missing from data directory.')
except:
print('Filenumbers', ', '.join(missing),' are missing from data directory.')
def find_all_peaks(self):
''' Find charging shift associated w/ some large peak (e.g. O) for all pixels
return charge shift and peak amplitude (measure of significance)
works if scan regions are sufficiently large to allow smooth-diff peak
calcshifts performs similar function on direct peaks (but needs mod for big shifts)
peakind- index # of this peak in np Z direction; peakrange - associated energy range
lowind, highind, lowrange, hirange - above and below background regions
Params saved:
amplmap deriv-related (mostly for spectral plotting)
[0] ampl and [1] associated negpeak value [2] energy val (not index)
and smdiff peak width (#ev to popspeak)
integmap:
[0] best integcounts value and [1] assoc energy val
[2] slope and [3] intercept of background fit
shift map
counts max at peak position
'''
numpeaks=3 # max number of negpeaks in deriv spectrum to evaluate
# Two shift values (deriv and direct peaks) for each element
self.shiftmaps=[]
''' Parameters for s7d7 derivative peaks: [0] ampl [1] negval [2] negpeakind
[3] width (can get pospeak index and val indirectly for use in spectral plots)
'''
self.amplmaps=[]
''' Parameters for direct integrations: [0] integcnts [1] peak index (eV value
available indirectly through shiftmaps) [2] countsmax (unsubtracted int at peak)
[3] slope [4] intercept
'''
self.integmaps=[]
for i, [elem, order, peakind, lowind, highind, peakrange, lowrange, hirange,
idealev, idealind, idealnegpeak, integwidth, chargeshift, peakwidth,
searchwidth, kfact, kfact2, mass] in enumerate(self.elemdata):
print('Extracting maps for element', elem)
if str(lowind[0])=='nan' and str(highind[0])=='nan':
lowind, lowrange, highind, hirange, peakind, peakrange= self.fixoddsetup(peakind, peakrange)
shiftmap=np.empty([self.specimage.shape[0],self.specimage.shape[1], 2])
amplmap=np.empty([self.specimage.shape[0],self.specimage.shape[1], 4])
integmap=np.empty([self.specimage.shape[0],self.specimage.shape[1], 5])
for X in range(0,self.specimage.shape[0]):
for Y in range(0,self.specimage.shape[1]):
# Get raw data associated w/ bigpeak (whole region not just peak subset)
rawdata=self.specimage[X,Y,lowind[0]:highind[1]+1]
if rawdata.max()<0.0001: # data missing.. only zeros for counts
shiftmap[X,Y]=np.nan
amplmap[X,Y]=np.nan
continue
s7d7=self.calcderiv(rawdata, peakind, lowind, highind, peakrange, lowrange, hirange)
# Find all relative minima
foundind=argrelmin(s7d7) # scipy.stat returned as tuple
mypeaks=pd.DataFrame()
thisenergy=np.arange(lowrange[0],hirange[1]+1,1.0)
mypeaks['Energy']=thisenergy[foundind]
mypeaks['negpeakval']=s7d7[foundind]
mypeaks['negpeakind']=foundind[0] # relative index (original position lost)
# Find associated pospeaks
# row=mypeaks.loc[0]
for index, row in mypeaks.iterrows():
# searching within s7d7 (index zeroed)
lowlim=int(row.negpeakind-peakwidth-searchwidth) # again indices relative to lowind[0]
uplim=int(row.negpeakind - peakwidth + searchwidth + 1)
if lowlim<0:
lowlim=0
if uplim>len(s7d7)-1:
uplim=len(s7d7)
try:
pospeakval=s7d7[lowlim:uplim].max()
# index in terms of original s7d7 dataset
pospeakind=np.unravel_index(s7d7[lowlim:uplim].argmax(), s7d7.shape)[0]+lowlim
# now calculate amplitude and s7d7 peak width
mypeaks=mypeaks.set_value(index,'Ampl',pospeakval-row.negpeakval)
mypeaks=mypeaks.set_value(index,'Ampl',pospeakval-row.negpeakval)
# peakwidth = negpeak- pospeak
mypeaks=mypeaks.set_value(index,'Peakwidth',row.negpeakind-pospeakind)
except:
print('No s7d7 amplitude for', elem,'peak', str(index), 'of pixel', str(X),str(Y))
# Can consider amplitude, peakwidth and position when looking for real peaks
mypeaks=mypeaks.sort_values(['Ampl'], ascending=False).head(numpeaks)
mypeaks=mypeaks.reset_index(drop=True) # necessary?
# DIRECT peak check.. for top n peaks, also check for direct peak maxima (no background yet)
peakdata= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import git
import math
import os
import json
from sklearn import neighbors
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression, Ridge,Lasso
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import pacf
from pandas.plotting import autocorrelation_plot
from datetime import datetime
from urllib.request import urlopen
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
import lightgbm as lgb
import statsmodels.tsa.stattools as ts
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
# gets list of all fips numbers
def get_fips():
Y = pd.read_csv(f"{homedir}/data/us/covid/deaths.csv")
fips_list = Y.countyFIPS.values
fips_list = fips_list[fips_list != 1] # shitty fucking
fips_list = fips_list[fips_list != 0] # data
return set(fips_list)
def get_date(datestr, formatstr='%Y-%m-%d'):
return datetime.strptime(datestr, formatstr)
class CumDeathCounter():
def __init__(self):
self.cum_deaths = pd.read_csv(f"{homedir}/data/us/covid/deaths.csv")
self.cum_deaths = self.cum_deaths.iloc[1:]
fips_list = self.cum_deaths.countyFIPS.values
fips_list = fips_list[fips_list != 1] # shitty fucking
fips_list = fips_list[fips_list != 0] # data
self.cache = {}
for fips in fips_list:
self.cache[fips] = self.get_cum_deaths(fips)
def get_cum_deaths(self, fips, clip_zeros=False):
idx = self.cum_deaths.index[self.cum_deaths['countyFIPS'] == fips].values[0]
county_deaths = self.cum_deaths.loc[self.cum_deaths['countyFIPS'] == fips]
dates = pd.to_datetime(county_deaths.columns[4:].values).map(lambda dt : str(dt))
X = np.array([(get_date(d[:10]) - get_date('2020-01-01')).days for d in dates])
y = []
for i in range(4, len(county_deaths.columns)):
y.append(county_deaths.loc[idx,county_deaths.columns[i]])
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
return X[i:], y[i:]
def getY(self, fips):
return self.cache[fips]
class CumCaseCounter():
def __init__(self):
self.cum_cases = pd.read_csv(f"{homedir}/data/us/covid/confirmed_cases.csv")
self.cum_cases = self.cum_cases.iloc[1:]
self.cum_cases = self.cum_cases.iloc[:, :-1]
fips_list = self.cum_cases.countyFIPS.values
fips_list = fips_list[fips_list != 1] # shitty fucking
fips_list = fips_list[fips_list != 0] # data
self.cache = {}
for fips in fips_list:
self.cache[fips] = self.get_cum_cases(fips)
def get_cum_cases(self, fips,clip_zeros=False):
idx = self.cum_cases.index[self.cum_cases['countyFIPS'] == fips].values[0]
county_cases = self.cum_cases.loc[self.cum_cases['countyFIPS'] == fips]
dates = pd.to_datetime(county_cases.columns[4:].values).map(lambda dt : str(dt))
X = np.array([(get_date(d[:10]) - get_date('2020-01-01')).days for d in dates])
y = []
for i in range(4, len(county_cases.columns)):
y.append(county_cases.loc[idx,county_cases.columns[i]])
if not clip_zeros:
return X, y
for i in range(len(y)):
if y[i] != 0:
return X[i:], y[i:]
def getY(self, fips):
return self.cache[fips]
class DeltaDeathCounter():
def __init__(self):
self.df = | pd.read_csv(f"{homedir}/data/us/covid/nyt_us_counties_daily.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
def GetDataset(name, base_path):
""" Load a dataset
Parameters
----------
name : string, dataset name
base_path : string, e.g. "path/to/datasets/directory/"
Returns
-------
X : features (nXp)
y : labels (n)
"""
if name=="meps_19":
df = pd.read_csv(base_path + 'meps_19_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="meps_20":
df = pd.read_csv(base_path + 'meps_20_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="meps_21":
df = pd.read_csv(base_path + 'meps_21_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT16F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="star":
df = pd.read_csv(base_path + 'STAR.csv')
df.loc[df['gender'] == 'female', 'gender'] = 0
df.loc[df['gender'] == 'male', 'gender'] = 1
df.loc[df['ethnicity'] == 'cauc', 'ethnicity'] = 0
df.loc[df['ethnicity'] == 'afam', 'ethnicity'] = 1
df.loc[df['ethnicity'] == 'asian', 'ethnicity'] = 2
df.loc[df['ethnicity'] == 'hispanic', 'ethnicity'] = 3
df.loc[df['ethnicity'] == 'amindian', 'ethnicity'] = 4
df.loc[df['ethnicity'] == 'other', 'ethnicity'] = 5
df.loc[df['stark'] == 'regular', 'stark'] = 0
df.loc[df['stark'] == 'small', 'stark'] = 1
df.loc[df['stark'] == 'regular+aide', 'stark'] = 2
df.loc[df['star1'] == 'regular', 'star1'] = 0
df.loc[df['star1'] == 'small', 'star1'] = 1
df.loc[df['star1'] == 'regular+aide', 'star1'] = 2
df.loc[df['star2'] == 'regular', 'star2'] = 0
df.loc[df['star2'] == 'small', 'star2'] = 1
df.loc[df['star2'] == 'regular+aide', 'star2'] = 2
df.loc[df['star3'] == 'regular', 'star3'] = 0
df.loc[df['star3'] == 'small', 'star3'] = 1
df.loc[df['star3'] == 'regular+aide', 'star3'] = 2
df.loc[df['lunchk'] == 'free', 'lunchk'] = 0
df.loc[df['lunchk'] == 'non-free', 'lunchk'] = 1
df.loc[df['lunch1'] == 'free', 'lunch1'] = 0
df.loc[df['lunch1'] == 'non-free', 'lunch1'] = 1
df.loc[df['lunch2'] == 'free', 'lunch2'] = 0
df.loc[df['lunch2'] == 'non-free', 'lunch2'] = 1
df.loc[df['lunch3'] == 'free', 'lunch3'] = 0
df.loc[df['lunch3'] == 'non-free', 'lunch3'] = 1
df.loc[df['schoolk'] == 'inner-city', 'schoolk'] = 0
df.loc[df['schoolk'] == 'suburban', 'schoolk'] = 1
df.loc[df['schoolk'] == 'rural', 'schoolk'] = 2
df.loc[df['schoolk'] == 'urban', 'schoolk'] = 3
df.loc[df['school1'] == 'inner-city', 'school1'] = 0
df.loc[df['school1'] == 'suburban', 'school1'] = 1
df.loc[df['school1'] == 'rural', 'school1'] = 2
df.loc[df['school1'] == 'urban', 'school1'] = 3
df.loc[df['school2'] == 'inner-city', 'school2'] = 0
df.loc[df['school2'] == 'suburban', 'school2'] = 1
df.loc[df['school2'] == 'rural', 'school2'] = 2
df.loc[df['school2'] == 'urban', 'school2'] = 3
df.loc[df['school3'] == 'inner-city', 'school3'] = 0
df.loc[df['school3'] == 'suburban', 'school3'] = 1
df.loc[df['school3'] == 'rural', 'school3'] = 2
df.loc[df['school3'] == 'urban', 'school3'] = 3
df.loc[df['degreek'] == 'bachelor', 'degreek'] = 0
df.loc[df['degreek'] == 'master', 'degreek'] = 1
df.loc[df['degreek'] == 'specialist', 'degreek'] = 2
df.loc[df['degreek'] == 'master+', 'degreek'] = 3
df.loc[df['degree1'] == 'bachelor', 'degree1'] = 0
df.loc[df['degree1'] == 'master', 'degree1'] = 1
df.loc[df['degree1'] == 'specialist', 'degree1'] = 2
df.loc[df['degree1'] == 'phd', 'degree1'] = 3
df.loc[df['degree2'] == 'bachelor', 'degree2'] = 0
df.loc[df['degree2'] == 'master', 'degree2'] = 1
df.loc[df['degree2'] == 'specialist', 'degree2'] = 2
df.loc[df['degree2'] == 'phd', 'degree2'] = 3
df.loc[df['degree3'] == 'bachelor', 'degree3'] = 0
df.loc[df['degree3'] == 'master', 'degree3'] = 1
df.loc[df['degree3'] == 'specialist', 'degree3'] = 2
df.loc[df['degree3'] == 'phd', 'degree3'] = 3
df.loc[df['ladderk'] == 'level1', 'ladderk'] = 0
df.loc[df['ladderk'] == 'level2', 'ladderk'] = 1
df.loc[df['ladderk'] == 'level3', 'ladderk'] = 2
df.loc[df['ladderk'] == 'apprentice', 'ladderk'] = 3
df.loc[df['ladderk'] == 'probation', 'ladderk'] = 4
df.loc[df['ladderk'] == 'pending', 'ladderk'] = 5
df.loc[df['ladderk'] == 'notladder', 'ladderk'] = 6
df.loc[df['ladder1'] == 'level1', 'ladder1'] = 0
df.loc[df['ladder1'] == 'level2', 'ladder1'] = 1
df.loc[df['ladder1'] == 'level3', 'ladder1'] = 2
df.loc[df['ladder1'] == 'apprentice', 'ladder1'] = 3
df.loc[df['ladder1'] == 'probation', 'ladder1'] = 4
df.loc[df['ladder1'] == 'noladder', 'ladder1'] = 5
df.loc[df['ladder1'] == 'notladder', 'ladder1'] = 6
df.loc[df['ladder2'] == 'level1', 'ladder2'] = 0
df.loc[df['ladder2'] == 'level2', 'ladder2'] = 1
df.loc[df['ladder2'] == 'level3', 'ladder2'] = 2
df.loc[df['ladder2'] == 'apprentice', 'ladder2'] = 3
df.loc[df['ladder2'] == 'probation', 'ladder2'] = 4
df.loc[df['ladder2'] == 'noladder', 'ladder2'] = 5
df.loc[df['ladder2'] == 'notladder', 'ladder2'] = 6
df.loc[df['ladder3'] == 'level1', 'ladder3'] = 0
df.loc[df['ladder3'] == 'level2', 'ladder3'] = 1
df.loc[df['ladder3'] == 'level3', 'ladder3'] = 2
df.loc[df['ladder3'] == 'apprentice', 'ladder3'] = 3
df.loc[df['ladder3'] == 'probation', 'ladder3'] = 4
df.loc[df['ladder3'] == 'noladder', 'ladder3'] = 5
df.loc[df['ladder3'] == 'notladder', 'ladder3'] = 6
df.loc[df['tethnicityk'] == 'cauc', 'tethnicityk'] = 0
df.loc[df['tethnicityk'] == 'afam', 'tethnicityk'] = 1
df.loc[df['tethnicity1'] == 'cauc', 'tethnicity1'] = 0
df.loc[df['tethnicity1'] == 'afam', 'tethnicity1'] = 1
df.loc[df['tethnicity2'] == 'cauc', 'tethnicity2'] = 0
df.loc[df['tethnicity2'] == 'afam', 'tethnicity2'] = 1
df.loc[df['tethnicity3'] == 'cauc', 'tethnicity3'] = 0
df.loc[df['tethnicity3'] == 'afam', 'tethnicity3'] = 1
df.loc[df['tethnicity3'] == 'asian', 'tethnicity3'] = 2
df = df.dropna()
grade = df["readk"] + df["read1"] + df["read2"] + df["read3"]
grade += df["mathk"] + df["math1"] + df["math2"] + df["math3"]
names = df.columns
target_names = names[8:16]
data_names = np.concatenate((names[0:8],names[17:]))
X = df.loc[:, data_names].values
y = grade.values
if name=="facebook_1":
df = pd.read_csv(base_path + 'facebook/Features_Variant_1.csv')
y = df.iloc[:,53].values
X = df.iloc[:,0:53].values
if name=="facebook_2":
df = pd.read_csv(base_path + 'facebook/Features_Variant_2.csv')
y = df.iloc[:,53].values
X = df.iloc[:,0:53].values
if name=="bio":
#https://github.com/joefavergel/TertiaryPhysicochemicalProperties/blob/master/RMSD-ProteinTertiaryStructures.ipynb
df = pd.read_csv(base_path + 'CASP.csv')
y = df.iloc[:,0].values
X = df.iloc[:,1:].values
if name=='blog_data':
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(base_path + 'blogData_train.csv', header=None)
X = df.iloc[:,0:280].values
y = df.iloc[:,-1].values
if name == "concrete":
dataset = np.loadtxt(open(base_path + 'Concrete_Data.csv', "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:]
if name=="bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df=pd.read_csv(base_path + 'bike_train.csv')
# # seperating season as per values. this is bcoz this will enhance features.
season=pd.get_dummies(df['season'],prefix='season')
df=pd.concat([df,season],axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather=pd.get_dummies(df['weather'],prefix='weather')
df=pd.concat([df,weather],axis=1)
# # # now can drop weather and season.
df.drop(['season','weather'],inplace=True,axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in | pd.DatetimeIndex(df.datetime) | pandas.DatetimeIndex |
import pandas as pd
import sparse
import numpy as np
class AnnotationData:
"""
Contains all the segmentation and assignment data
WARNING: self.assignments['Clusternames'] will contain neurite ids (as strings) rather than names
"""
# Todo: if we can preserve segments instead of merging them when two segs are one same neuron, that would help
# (make possible) the classification
# TODO: what happens to features when neurons/segs are reassigned? features go rotten because the segment key is unchanged
def __init__(self, stem_savefile, frame_shape: tuple = (512, 512, 35)): # Todo: is it right to have a default value here?
"""
Initialize the class for segments and assignments
:param stem_savefile: The stem name for the files in which to save assignments and segments
:param frame_shape: the shape of the numpy array of any frame of the video
"""
self._normal_seg_file = stem_savefile + "_segmented.csv"
self._coarse_seg_file = stem_savefile + "_highthresh_segmented.csv"
self.assignment_file = stem_savefile + "_assignment.csv"
try:
self._normal_data_frame = pd.read_csv(self._normal_seg_file)
except FileNotFoundError:
self._normal_data_frame = pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int)
try:
self._coarse_data_frame = pd.read_csv(self._coarse_seg_file)
except FileNotFoundError:
self._coarse_data_frame = pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int)
# whether to deal with coarse segmentation:
self.coarse_seg_mode = False
self.shape = frame_shape
try:
self.assignments = pd.read_csv(self.assignment_file)
except FileNotFoundError:
self.assignments = pd.DataFrame({"Time": [], "Segment": [], "Clusternames": []}, dtype=int)
self.new_format()
@property
def data_frame(self):
if self.coarse_seg_mode:
return self._coarse_data_frame
else:
return self._normal_data_frame
@data_frame.setter
def data_frame(self, value):
if self.coarse_seg_mode:
self._coarse_data_frame = value
else:
self._normal_data_frame = value
@property
def seg_file(self):
if self.coarse_seg_mode:
return self._coarse_seg_file
else:
return self._normal_seg_file
def new_format(self):
"""
This is for backwards compatibility. Clusternames used to be names, now we want them to be int identifiers.
Here we map all clusternames to unique ints.
"""
if not len(self.assignments):
return
if isinstance(self.assignments["Clusternames"][0], str): # no need to convert if they are already ints
d = {name: i+1 for i, name in enumerate(self.assignments["Clusternames"].unique())}
self.assignments["Clusternames"] = self.assignments["Clusternames"].map(d)
self.assignments = self.assignments.astype(int)
# Todo: more generally, constrain all values to int
def segmented_times(self):
"""
:return [t1, t2, t3, ...]: all times in this database
"""
return self.data_frame['Time'].unique()
def segmented_frame(self, t, coarse=None):
"""
Gets the segmented frame.
:param t: time frame
:param coarse: whether to return the coarse segmentation (if not provided, depends on current mode)
:return segmented: 3D numpy array with segmented[x,y,z] = segment_id, or 0 for background
"""
if coarse is True:
df = self._coarse_data_frame
elif coarse is False: # should not happen, but just to be complete
df = self._normal_data_frame
else:
df = self.data_frame
if t not in df.Time:
raise KeyError
segment_time = df[df['Time'] == t]
frame = sparse.COO([segment_time.x, segment_time.y, segment_time.z], segment_time.Segment, shape=self.shape)
return frame.todense()
def get_segs_and_assignments(self, times):
"""
Returns list of all segments for given times and corresponding list of assigned neurites.
:param times: iterable of time frames for which to get segments
:return segments, neurites: segments = [(t1, s1), (t1, s2), ..., (t2, s), ...] list of segments for given frames
neurites = [n1, n2, ...] neurite assigned to each segment in segments
"""
assignments = self.assignments[self.assignments['Time'].isin(times)]
segments = assignments[['Time', 'Segment']].values
neurites = assignments['Clusternames'].to_list()
return segments, neurites
def get_mask(self, t=0, force_original=False):
"""
Gets the mask of neurons in time frame t.
:param t: time frame
:param force_original: True forces non-coarse segmentation
:return: mask (3D array of same shape as frame) with neuron id for pixel value (0 for background)
"""
if force_original:
df = self._normal_data_frame
else:
df = self.data_frame
if t not in df.Time.values or t not in self.assignments.Time.values: # Todo: make sure t not in self.assignments.Time is not needed, as that should never happen
raise KeyError
segs, neus = self.get_segs_and_assignments([t])
d = {seg[1]: neu for seg, neu in zip(segs, neus)}
d[0] = 0
segment_time = df[df['Time'] == t]
frame = sparse.COO([segment_time.x, segment_time.y, segment_time.z], segment_time.Segment.map(d), shape=self.shape)
return frame.todense()
@property
def real_neurites(self):
"""List of neurites of interest (not background or noise)"""
clusternames = self.assignments['Clusternames'].unique().tolist()
# clusternames = [cln for cln in clusternames if cln.lower() != "noise"]
# Todo
return clusternames
@property
def nb_neurons(self):
return len(self.real_neurites)
@nb_neurons.setter
def nb_neurons(self, value):
# TODO
pass
# TODO: get rid of real_neurites?
# editing the data
def _save_mask(self, t, mask):
d = {(t, i): i for i in np.unique(mask) if i}
self.add_segmentation(t, mask)
self.assign(d)
def add_segmentation(self, t, segmented):
"""
Stores (or replaces if existing?) the segmentation for time t.
:param t: time frame
:param segmented: segmented frame (3D numpy array with segmented[x,y,z] = segment (0 if background)
"""
# Todo: maybe save to savefile sometimes
x, y, z = np.nonzero(segmented)
s = segmented[x, y, z]
df = pd.DataFrame({"Time": t, "Segment": s, "x": x, "y": y, "z": z})
self.data_frame = self.data_frame.append(df, ignore_index=True, sort=False)
self.data_frame.drop_duplicates(subset=("Time", "x", "y", "z"), keep="last", inplace=True) # todo: can we ever duplicate values??
def assign(self, assignment_dict, update_nb_neurons=False):
if update_nb_neurons: # TODO
pass
# nb_neurons = max(assignment_dict.values())
# self.nb_neurons = nb_neurons
times, segs, cls = [], [], []
for key, val in assignment_dict.items():
times.append(key[0])
segs.append(key[1])
cls.append(val)
df = | pd.DataFrame({"Time": times, "Segment": segs, "Clusternames": cls}) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), pd.Series(actual), check_names=False)
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_natgas_forward_price():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21-G21'))
expected = pd.Series([2.8629152542372878], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='F21-I21')
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='I21')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock,
price_method='GDD',
contract_range='F21')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_get_iso_data():
tz_map = {'MISO': 'US/Central', 'CAISO': 'US/Pacific'}
for key in tz_map:
assert (tm._get_iso_data(key)[0] == tz_map[key])
def test_string_to_date_interval():
assert (tm._string_to_date_interval("K20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("K20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("k20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("k20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("Cal22")['start_date'] == datetime.date(2022, 1, 1))
assert (tm._string_to_date_interval("Cal22")['end_date'] == datetime.date(2022, 12, 31))
assert (tm._string_to_date_interval("Cal2012")['start_date'] == datetime.date(2012, 1, 1))
assert (tm._string_to_date_interval("Cal2012")['end_date'] == datetime.date(2012, 12, 31))
assert (tm._string_to_date_interval("Cal53")['start_date'] == datetime.date(1953, 1, 1))
assert (tm._string_to_date_interval("Cal53")['end_date'] == datetime.date(1953, 12, 31))
assert (tm._string_to_date_interval("2010")['start_date'] == datetime.date(2010, 1, 1))
assert (tm._string_to_date_interval("2010")['end_date'] == datetime.date(2010, 12, 31))
assert (tm._string_to_date_interval("3Q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3Q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2h2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2h2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("3q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2H2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2H2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("Mar2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("Mar2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("March2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("March2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("5Q20") == "Invalid Quarter")
assert (tm._string_to_date_interval("HH2021") == "Invalid num")
assert (tm._string_to_date_interval("3H2021") == "Invalid Half Year")
assert (tm._string_to_date_interval("Cal2a") == "Invalid year")
assert (tm._string_to_date_interval("Marc201") == "Invalid date code")
assert (tm._string_to_date_interval("M1a2021") == "Invalid date code")
assert (tm._string_to_date_interval("Marcha2021") == "Invalid date code")
assert (tm._string_to_date_interval("I20") == "Invalid month")
assert (tm._string_to_date_interval("20") == "Unknown date code")
def test_implied_vol_commod():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_implied_volatility)
mock = Index('MA001', AssetClass.Commod, 'Option NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.implied_volatility(mock,
tenor='F21-H21')
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
def test_fair_price():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
mock2 = Swap('MA002', AssetClass.Commod, 'Swap Oil')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock,
tenor='F21')
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.fair_price(mock,
tenor=None)
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price_swap)
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock2)
assert_series_equal(pd.Series([2.880],
index=[pd.Timestamp('2019-01-02')],
name='fairPrice'),
pd.Series(actual),
)
replace.restore()
def test_weighted_average_valuation_curve_for_calendar_strip():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21-H21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='Invalid',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F20-I20',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='3H20',
query_type=QueryType.PRICE,
measure_field='fairPrice'
)
replace.restore()
def test_fundamental_metrics():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
period = '1y'
direction = tm.FundamentalMetricPeriodDirection.FORWARD
actual = tm.dividend_yield(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.dividend_yield(..., period, direction, real_time=True)
actual = tm.earnings_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share(..., period, direction, real_time=True)
actual = tm.earnings_per_share_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share_positive(..., period, direction, real_time=True)
actual = tm.net_debt_to_ebitda(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.net_debt_to_ebitda(..., period, direction, real_time=True)
actual = tm.price_to_book(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_book(..., period, direction, real_time=True)
actual = tm.price_to_cash(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_cash(..., period, direction, real_time=True)
actual = tm.price_to_earnings(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings(..., period, direction, real_time=True)
actual = tm.price_to_earnings_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings_positive(..., period, direction, real_time=True)
actual = tm.price_to_sales(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), | pd.Series(actual) | pandas.Series |
import pandas as pd
import numpy as np
import math
import os
import geopandas as gpd
import folium
import requests
import json
import datetime
from datetime import date, timedelta
from abc import ABC, abstractmethod
from pathlib import Path
from CovidFoliumMap import CovidFoliumMap, ensure_path_exists, download_JSON_file
""" This classes generate different folium maps based on the data of the RKI using access to the
RKI Covid-19 API.
The class inherits from the CovidFoliumMap class. Here are some usefull links:
- Geodata sources for Germany
From the Bundesamt für Kartographie und Geodäsie:
License plates (wfs_kfz250): https://gdz.bkg.bund.de/index.php/default/open-data/wfs-kfz-kennzeichen-1-250-000-wfs-kfz250.html
Counties & population (wfs_vg250-ew): https://gdz.bkg.bund.de/index.php/default/open-data/wfs-verwaltungsgebiete-1-250-000-mit-einwohnerzahlen-stand-31-12-wfs-vg250-ew.html
From OpenDataLab
Good county, city, village maps with optional other meta information
Portal: http://opendatalab.de/projects/geojson-utilities/
a download from there creates 'landkreise_simplify0.geojson'. The 0 refers to highest resolution (1:250000)
GitHub: https://github.com/opendatalab-de/simple-geodata-selector
- RKI Covid-19 API
Great REST API to retrieve the Covid-19 data of the RKI
https://api.corona-zahlen.org/docs/endpoints/districts.html#districts-history-recovered
BUT:
The RKI divides Berlin in districts and that doesn't match regular geoJSON files. Therefore you should use the RKI geoJSON for
German counties/cities: https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/917fc37a709542548cc3be077a786c17_0/explore to
download 'RKI_Corona_Landkreise.geojson'
"""
class CovidFoliumMapDEcounties(CovidFoliumMap):
"""
This class will expose an interface to deal with Choropleth maps to display Covid-19 data attributes for counties and cities in Germany.
"""
def __init__(self, dataDirectory = '../data'):
""" Constructor
Args:
dataDirectory (str, optional): The data directory to be used for cached data. Defaults to '../data'.
"""
# init members
self.__dataDirectory = dataDirectory + '/'
self.__dfGeo = None
self.__dfData = None
self.__defaultMapOptions = CovidFoliumMap.mapOptions(mapDate=date.today(),
mapAlias = 'MapDEcounty',
mapLocation = [51.3, 10.5],
mapZoom = 6,
bins = [5, 25, 50, 100, 200, 400, 800, 1200, 1600, 2600],
mapAttribute = 'Robert Koch-Institut (RKI), dl-de/by-2-0, CMBT 2022',
tooltipAttributes = ['GeoName',
'Cases',
'Deaths',
'WeeklyCases',
'WeeklyDeaths',
'DailyCases',
'DailyDeaths',
'DailyRecovered',
'Incidence7DayPer100Kpopulation'])
# ensure that the data directory exists, meaning to create it if it is not available
self.__dataDirectory = ensure_path_exists(dataDirectory)
# check if it really exists
if self.__dataDirectory != '':
# get the geo JSON data frame
self.__dfGeo = self.__get_geo_data()
# get the covid data for all counties/cities in the geo dataframe
if not self.get_geo_df is None:
self.__dfData = self.__get_covid_data(self.__dfGeo)
# init base class
super().__init__(self.__dataDirectory)
def __get_geo_data(self):
""" Downloads the JSON file from the RKI server if necessary and opens it to return a geoPandas dataframe. The function throws an
exception in case of an error
Returns:
geo dataframe: the geo dataframe of the German counties and cities or None if it can't load the file
"""
# init return
geoDf = None
# the filename of the geoJSON that is used
targetFilename = self.__dataDirectory + '/' + 'RKI_Corona_Landkreise.geojson'
# check if it exist already
if not os.path.exists(targetFilename):
# download the file
print('Downloading data (RKI_Corona_Landkreise.geojson), that might take some time...')
endpoint = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_Landkreisdaten/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json'
# the manual download link is
# 'https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/917fc37a709542548cc3be077a786c17_0/explore?location=51.282342%2C10.714458%2C6.71'
try:
# try to download the file
download_JSON_file(endpoint, targetFilename)
print('Download finished.')
except Exception as e:
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
# now the file should exist
if os.path.exists(targetFilename):
# load the file
geoDf = gpd.read_file(targetFilename)
#print(geoDf.head())
# finally return the geo df
return geoDf
def __get_covid_data(self, geoDf):
""" Downloads the covid-19 data from the RKI servers if necessary, caches them and opens a final csv to return a Pandas dataframe.
Returns:
covid dataframe: the covid data for the German counties and cities or None if it can't load the file
"""
# init the result
df = None
# get the date
today = date.today()
# the prefix of the CSV file is Y-m-d
preFix = today.strftime('%Y-%m-%d') + "-RKIcounty"
# the target filename of the csv to be downloaded
targetFilename = self.__dataDirectory + '/' + preFix + '-db.csv'
# check if it exist already
if os.path.exists(targetFilename):
print('using existing file: ' + targetFilename)
# read the file
df = pd.read_csv(targetFilename)
else:
print('Downloading data (yy-mm-dd--RKIcounty-db.csv), that might take some time...')
# build a result df
dfs = []
for id in geoDf['RS']:
try:
# get the data for the county
df = self.__get_county_data_from_web(id)
# add it to the list
dfs.append(df)
except:
msg = 'Error getting the data for ' + str(id) + '!'
print(msg)
try:
# finally concatenate all dfs together
df = | pd.concat(dfs) | pandas.concat |
import os
from functools import lru_cache
import time
import requests
from multiprocessing import Pool
from datetime import datetime, timedelta
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
INIT_COUNTRY = os.environ.get('COUNTRY', 'Canada')
LAT_RANGES = {
'Canada': [40, 83],
'US': [25, 55]
}
LON_RANGES = {
'Canada': [-125, -54],
'US': [-120, -73]
}
PROVINCE_NAME = {
'Canada': 'Province',
'US': 'State'
}
def get_geojson_canada():
response = requests.get('https://raw.githubusercontent.com/codeforamerica/click_that_hood/master/public/data/canada.geojson')
geojson = response.json()
for i, gj in enumerate(geojson['features']):
if 'Yukon' in gj['properties']['name']:
gj['properties']['name'] = 'Yukon'
geojson['features'][i] = gj
return geojson
def get_geojson_us():
response = requests.get('https://raw.githubusercontent.com/codeforamerica/click_that_hood/master/public/data/united-states.geojson')
geojson = response.json()
return geojson
GEO_FNS = {
'Canada': get_geojson_canada,
'US': get_geojson_us,
}
@lru_cache(1)
def get_geojson(country):
return GEO_FNS[country]()
# TODO: finish global data function
# import pytz
# from tzwhere import tzwhere
# data = pd.read_csv(
# 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/'
# 'csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
# )
# data = data.set_index(['Country/Region', 'Province/State'])
# def get_tz(x):
# try:
# return pytz.timezone(tzwhere.tzNameAt(*x.values, forceTZ=True))
# except Exception as e:
# print(x, x.index)
# raise e
# coords = data[['Lat', 'Long']]
# tzwhere = tzwhere.tzwhere(forceTZ=True)
# coords['tz'] = coords.apply(get_tz, axis=1)
# data = data.drop(columns=['Lat', 'Long'])
# data = data.transpose()
# data['date_index'] = pd.to_datetime(data.index)
# data = data.set_index('date_index')
def get_data_canada():
data = pd.read_csv('https://health-infobase.canada.ca/src/data/covidLive/covid19.csv')
data = data[['prname', 'date', 'numdeaths', 'numtotal', 'numtested']]
data['date_index'] = pd.to_datetime(data.date, format='%d-%m-%Y')
data.date = data.date_index.dt.strftime('%Y-%m-%d')
data.set_index('date_index', inplace=True)
data.columns = ['Province', 'Date', 'Total Deaths', 'Total Cases', 'Total Tests']
data.sort_index(inplace=True)
provinces_totals = (
data.groupby('Province')
.agg({'Total Cases': max})
.reset_index()
.sort_values('Total Cases', ascending=False)
)
return data, provinces_totals
def get_data_us():
data = | pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv') | pandas.read_csv |
from PySDDP.dessem.script.templates.dadger import DadgerTemplate
import pandas as pd
import os
from typing import IO
COMENTARIO = '&'
class Dadger(DadgerTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Entdados do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.comentarios = list()
self.tm = dict()
self.sist = dict()
self.ree = dict()
self.uh = dict()
self.tviag = dict()
self.ut = dict()
self.usie = dict()
self.dp = dict()
self.de = dict()
self.cd = dict()
self.ri = dict()
self.ia = dict()
self.rd = dict()
self.rivar = dict()
self.it = dict()
self.gp = dict()
self.ni = dict()
self.ve = dict()
self.ci_ce = dict()
self.re = dict()
self.lu = dict()
self.fh = dict()
self.ft = dict()
self.fi = dict()
self.fe = dict()
self.fr = dict()
self.fc = dict()
self.ac = dict()
self.da = dict()
self.fp = dict()
self.ez = dict()
self.ag = dict()
self.mh = dict()
self.mt = dict()
self.tx = dict()
self.pq = dict()
self.secr = dict()
self.cr = dict()
self.r11 = dict()
self.vr = dict()
self.pd = dict()
self.vm = dict()
self.df = dict()
self.me = dict()
self.meta_cjsist = dict()
self.meta_sist = dict()
self.meta_usit = dict()
self.sh = dict()
self.tf = dict()
self.rs = dict()
self.sp = dict()
self.ps = dict()
self.pp = dict()
def ler(self, file_name: str) -> None:
self.entdados = list()
# listas referentes ao dicionário TM
self.tm['mne'] = list()
self.tm['dd'] = list()
self.tm['hr'] = list()
self.tm['mh'] = list()
self.tm['durac'] = list()
self.tm['rede'] = list()
self.tm['patamar'] = list()
# listas referentes ao dicionário SIST
self.sist['mne'] = list()
self.sist['num'] = list()
self.sist['mne_iden'] = list()
self.sist['flag'] = list()
self.sist['nome'] = list()
# listas referentes ao dicionário REE
self.ree['mne'] = list()
self.ree['num_ree'] = list()
self.ree['num_sub'] = list()
self.ree['nome'] = list()
# listas referentes ao dicionário UH
self.uh['mne'] = list()
self.uh['ind'] = list()
self.uh['nome'] = list()
self.uh['ss'] = list()
self.uh['vinic'] = list()
self.uh['evap'] = list()
self.uh['di'] = list()
self.uh['hi'] = list()
self.uh['m'] = list()
self.uh['vmor'] = list()
self.uh['prod'] = list()
self.uh['rest'] = list()
# listas referentes ao dicionário TVIAG
self.tviag['mne'] = list()
self.tviag['mont'] = list()
self.tviag['jus'] = list()
self.tviag['tp'] = list()
self.tviag['hr'] = list()
self.tviag['tpTviag'] = list()
# listas referentes ao dicionário UT
self.ut['mne'] = list()
self.ut['num'] = list()
self.ut['nome'] = list()
self.ut['ss'] = list()
self.ut['flag'] = list()
self.ut['di'] = list()
self.ut['hi'] = list()
self.ut['mi'] = list()
self.ut['df'] = list()
self.ut['hf'] = list()
self.ut['mf'] = list()
self.ut['rest'] = list()
self.ut['gmin'] = list()
self.ut['gmax'] = list()
self.ut['g_anterior'] = list()
# listas referentes ao dicionário USIE
self.usie['mne'] = list()
self.usie['num'] = list()
self.usie['ss'] = list()
self.usie['nome'] = list()
self.usie['mont'] = list()
self.usie['jus'] = list()
self.usie['qmin'] = list()
self.usie['qmax'] = list()
self.usie['taxa_consumo'] = list()
# listas referentes ao dicionário DP
self.dp['mne'] = list()
self.dp['ss'] = list()
self.dp['di'] = list()
self.dp['hi'] = list()
self.dp['mi'] = list()
self.dp['df'] = list()
self.dp['hf'] = list()
self.dp['mf'] = list()
self.dp['demanda'] = list()
# listas referentes ao dicionário DE
self.de['mne'] = list()
self.de['nde'] = list()
self.de['di'] = list()
self.de['hi'] = list()
self.de['mi'] = list()
self.de['df'] = list()
self.de['hf'] = list()
self.de['mf'] = list()
self.de['demanda'] = list()
self.de['justific'] = list()
# listas referentes ao dicionário CD
self.cd['mne'] = list()
self.cd['is'] = list()
self.cd['cd'] = list()
self.cd['di'] = list()
self.cd['hi'] = list()
self.cd['mi'] = list()
self.cd['df'] = list()
self.cd['hf'] = list()
self.cd['mf'] = list()
self.cd['custo'] = list()
self.cd['limsup'] = list()
# listas referentes ao dicionário RI
self.ri['mne'] = list()
self.ri['di'] = list()
self.ri['hi'] = list()
self.ri['mi'] = list()
self.ri['df'] = list()
self.ri['hf'] = list()
self.ri['mf'] = list()
self.ri['gh50min'] = list()
self.ri['gh50max'] = list()
self.ri['gh60min'] = list()
self.ri['gh60max'] = list()
self.ri['ande'] = list()
# listas referentes ao dicionário IA
self.ia['mne'] = list()
self.ia['ss1'] = list()
self.ia['ss2'] = list()
self.ia['di'] = list()
self.ia['hi'] = list()
self.ia['mi'] = list()
self.ia['df'] = list()
self.ia['hf'] = list()
self.ia['mf'] = list()
self.ia['ss1_ss2'] = list()
self.ia['ss2_ss1'] = list()
# listas referentes ao dicionário RD
self.rd['mne'] = list()
self.rd['flag_fol'] = list()
self.rd['ncirc'] = list()
self.rd['dbar'] = list()
self.rd['lim'] = list()
self.rd['dlin'] = list()
self.rd['perd'] = list()
self.rd['formato'] = list()
# listas referentes ao dicionário RIVAR
self.rivar['mne'] = list()
self.rivar['num'] = list()
self.rivar['ss'] = list()
self.rivar['cod'] = list()
self.rivar['penalidade'] = list()
# listas referentes ao dicionário IT
self.it['mne'] = list()
self.it['num'] = list()
self.it['coef'] = list()
# listas referentes ao dicionário GP
self.gp['mne'] = list()
self.gp['tol_conv'] = list()
self.gp['tol_prob'] = list()
# listas referentes ao dicionário NI
self.ni['mne'] = list()
self.ni['flag'] = list()
self.ni['nmax'] = list()
# listas referentes ao dicionário VE
self.ve['mne'] = list()
self.ve['ind'] = list()
self.ve['di'] = list()
self.ve['hi'] = list()
self.ve['mi'] = list()
self.ve['df'] = list()
self.ve['hf'] = list()
self.ve['mf'] = list()
self.ve['vol'] = list()
# listas referentes ao dicionário CI/CE
self.ci_ce['mne'] = list()
self.ci_ce['num'] = list()
self.ci_ce['nome'] = list()
self.ci_ce['ss_busf'] = list()
self.ci_ce['flag'] = list()
self.ci_ce['di'] = list()
self.ci_ce['hi'] = list()
self.ci_ce['mi'] = list()
self.ci_ce['df'] = list()
self.ci_ce['hf'] = list()
self.ci_ce['mf'] = list()
self.ci_ce['unid'] = list()
self.ci_ce['linf'] = list()
self.ci_ce['lsup'] = list()
self.ci_ce['custo'] = list()
self.ci_ce['energia'] = list()
# listas referentes ao dicionário RE
self.re['mne'] = list()
self.re['ind'] = list()
self.re['di'] = list()
self.re['hi'] = list()
self.re['mi'] = list()
self.re['df'] = list()
self.re['hf'] = list()
self.re['mf'] = list()
# listas referentes ao dicionário LU
self.lu['mne'] = list()
self.lu['ind'] = list()
self.lu['di'] = list()
self.lu['hi'] = list()
self.lu['mi'] = list()
self.lu['df'] = list()
self.lu['hf'] = list()
self.lu['mf'] = list()
self.lu['linf'] = list()
self.lu['lsup'] = list()
# listas referentes ao dicionário FH
self.fh['mne'] = list()
self.fh['ind'] = list()
self.fh['di'] = list()
self.fh['hi'] = list()
self.fh['mi'] = list()
self.fh['df'] = list()
self.fh['hf'] = list()
self.fh['mf'] = list()
self.fh['ush'] = list()
self.fh['unh'] = list()
self.fh['fator'] = list()
# listas referentes ao dicionário FT
self.ft['mne'] = list()
self.ft['ind'] = list()
self.ft['di'] = list()
self.ft['hi'] = list()
self.ft['mi'] = list()
self.ft['df'] = list()
self.ft['hf'] = list()
self.ft['mf'] = list()
self.ft['ust'] = list()
self.ft['fator'] = list()
# listas referentes ao dicionário FI
self.fi['mne'] = list()
self.fi['ind'] = list()
self.fi['di'] = list()
self.fi['hi'] = list()
self.fi['mi'] = list()
self.fi['df'] = list()
self.fi['hf'] = list()
self.fi['mf'] = list()
self.fi['ss1'] = list()
self.fi['ss2'] = list()
self.fi['fator'] = list()
# listas referentes ao dicionário FE
self.fe['mne'] = list()
self.fe['ind'] = list()
self.fe['di'] = list()
self.fe['hi'] = list()
self.fe['mi'] = list()
self.fe['df'] = list()
self.fe['hf'] = list()
self.fe['mf'] = list()
self.fe['num_contrato'] = list()
self.fe['fator'] = list()
# listas referentes ao dicionário FR
self.fr['mne'] = list()
self.fr['ind'] = list()
self.fr['di'] = list()
self.fr['hi'] = list()
self.fr['mi'] = list()
self.fr['df'] = list()
self.fr['hf'] = list()
self.fr['mf'] = list()
self.fr['useol'] = list()
self.fr['fator'] = list()
# listas referentes ao dicionário FC
self.fc['mne'] = list()
self.fc['ind'] = list()
self.fc['di'] = list()
self.fc['hi'] = list()
self.fc['mi'] = list()
self.fc['df'] = list()
self.fc['hf'] = list()
self.fc['mf'] = list()
self.fc['demanda'] = list()
self.fc['fator'] = list()
# listas referentes ao dicionário AC
self.ac['mne'] = list()
self.ac['usi'] = list()
self.ac['mneumonico'] = list()
self.ac['ind'] = list()
self.ac['valor'] = list()
# listas referentes ao dicionário DA
self.da['mne'] = list()
self.da['ind'] = list()
self.da['di'] = list()
self.da['hi'] = list()
self.da['mi'] = list()
self.da['df'] = list()
self.da['hf'] = list()
self.da['mf'] = list()
self.da['taxa'] = list()
self.da['obs'] = list()
# listas referentes ao dicionário FP
self.fp['mne'] = list()
self.fp['usi'] = list()
self.fp['f'] = list()
self.fp['nptQ'] = list()
self.fp['nptV'] = list()
self.fp['concavidade'] = list()
self.fp['min_quadraticos'] = list()
self.fp['deltaV'] = list()
self.fp['tr'] = list()
# listas referentes ao dicionário EZ
self.ez['mne'] = list()
self.ez['usi'] = list()
self.ez['perc_vol'] = list()
# listas referentes ao dicionário AG
self.ag['mne'] = list()
self.ag['num_estagios'] = list()
# listas referentes ao dicionário MH
self.mh['mne'] = list()
self.mh['num'] = list()
self.mh['gr'] = list()
self.mh['id'] = list()
self.mh['di'] = list()
self.mh['hi'] = list()
self.mh['mi'] = list()
self.mh['df'] = list()
self.mh['hf'] = list()
self.mh['mf'] = list()
self.mh['f'] = list()
# listas referentes ao dicionário MT
self.mt['mne'] = list()
self.mt['ute'] = list()
self.mt['ug'] = list()
self.mt['di'] = list()
self.mt['hi'] = list()
self.mt['mi'] = list()
self.mt['df'] = list()
self.mt['hf'] = list()
self.mt['mf'] = list()
self.mt['f'] = list()
# listas referentes ao dicionário TX
self.tx['mne'] = list()
self.tx['taxa_fcf'] = list()
# listas referentes ao dicionário PQ
self.pq['mne'] = list()
self.pq['ind'] = list()
self.pq['nome'] = list()
self.pq['ss/b'] = list()
self.pq['di'] = list()
self.pq['hi'] = list()
self.pq['mi'] = list()
self.pq['df'] = list()
self.pq['hf'] = list()
self.pq['mf'] = list()
self.pq['geracao'] = list()
# listas referentes ao dicionário SECR
self.secr['mne'] = list()
self.secr['num'] = list()
self.secr['nome'] = list()
self.secr['usi_1'] = list()
self.secr['fator_1'] = list()
self.secr['usi_2'] = list()
self.secr['fator_2'] = list()
self.secr['usi_3'] = list()
self.secr['fator_3'] = list()
self.secr['usi_4'] = list()
self.secr['fator_4'] = list()
self.secr['usi_5'] = list()
self.secr['fator_5'] = list()
# listas referentes ao dicionário CR
self.cr['mne'] = list()
self.cr['num'] = list()
self.cr['nome'] = list()
self.cr['gr'] = list()
self.cr['A0'] = list()
self.cr['A1'] = list()
self.cr['A2'] = list()
self.cr['A3'] = list()
self.cr['A4'] = list()
self.cr['A5'] = list()
self.cr['A6'] = list()
# listas referentes ao dicionário R11
self.r11['mne'] = list()
self.r11['di'] = list()
self.r11['hi'] = list()
self.r11['mi'] = list()
self.r11['df'] = list()
self.r11['hf'] = list()
self.r11['mf'] = list()
self.r11['cotaIni'] = list()
self.r11['varhora'] = list()
self.r11['vardia'] = list()
self.r11['coef'] = list()
# listas referentes ao dicionário VR
self.vr['mne'] = list()
self.vr['dia'] = list()
self.vr['mneumo_verao'] = list()
# listas referentes ao dicionário PD
self.pd['mne'] = list()
self.pd['tol_perc'] = list()
self.pd['tol_MW'] = list()
# listas referentes ao dicionário VM
self.vm['mne'] = list()
self.vm['ind'] = list()
self.vm['di'] = list()
self.vm['hi'] = list()
self.vm['mi'] = list()
self.vm['df'] = list()
self.vm['hf'] = list()
self.vm['mf'] = list()
self.vm['taxa_enchimento'] = list()
# listas referentes ao dicionário DF
self.df['mne'] = list()
self.df['ind'] = list()
self.df['di'] = list()
self.df['hi'] = list()
self.df['mi'] = list()
self.df['df'] = list()
self.df['hf'] = list()
self.df['mf'] = list()
self.df['taxa_descarga'] = list()
# listas referentes ao dicionário ME
self.me['mne'] = list()
self.me['ind'] = list()
self.me['di'] = list()
self.me['hi'] = list()
self.me['mi'] = list()
self.me['df'] = list()
self.me['hf'] = list()
self.me['mf'] = list()
self.me['fator'] = list()
# listas referentes ao dicionário META CJSIST
self.meta_cjsist['mneumo'] = list()
self.meta_cjsist['ind'] = list()
self.meta_cjsist['nome'] = list()
# listas referentes ao dicionário META SIST
self.meta_sist['mne'] = list()
self.meta_sist['ind'] = list()
self.meta_sist['tp'] = list()
self.meta_sist['num'] = list()
self.meta_sist['meta'] = list()
self.meta_sist['tol_MW'] = list()
self.meta_sist['tol_perc'] = list()
# listas referentes ao dicionário META USIT
self.meta_usit['mne'] = list()
self.meta_usit['ind'] = list()
self.meta_usit['tp'] = list()
self.meta_usit['num'] = list()
self.meta_usit['meta'] = list()
self.meta_usit['tol_MW'] = list()
self.meta_usit['tol_perc'] = list()
# listas referentes ao dicionário SH
self.sh['mne'] = list()
self.sh['flag_simul'] = list()
self.sh['flag_pl'] = list()
self.sh['num_min'] = list()
self.sh['num_max'] = list()
self.sh['flag_quebra'] = list()
self.sh['ind_1'] = list()
self.sh['ind_2'] = list()
self.sh['ind_3'] = list()
self.sh['ind_4'] = list()
self.sh['ind_5'] = list()
# listas referentes ao dicionário TF
self.tf['mne'] = list()
self.tf['custo'] = list()
# listas referentes ao dicionário RS
self.rs['mne'] = list()
self.rs['cod'] = list()
self.rs['ind'] = list()
self.rs['subs'] = list()
self.rs['tp'] = list()
self.rs['comentario'] = list()
# listas referentes ao dicionário SP
self.sp['mne'] = list()
self.sp['flag'] = list()
# listas referentes ao dicionário PS
self.ps['mne'] = list()
self.ps['flag'] = list()
# listas referentes ao dicionário PP
self.pp['mne'] = list()
self.pp['flag'] = list()
self.pp['iteracoes'] = list()
self.pp['num'] = list()
self.pp['tp'] = list()
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha
if linha[0] == COMENTARIO:
self.comentarios.append(linha)
self.entdados.append(linha)
continue
mne = linha[:6].strip().lower()
mne_sigla = linha[:3].strip().lower()
mneumo = linha[:13].strip().lower()
self.entdados.append(linha[:6])
# Leitura dos dados de acordo com o mneumo correspondente
if mne_sigla == 'tm':
self.tm['mne'].append(self.linha[:2])
self.tm['dd'].append(self.linha[4:6])
self.tm['hr'].append(self.linha[9:11])
self.tm['mh'].append(self.linha[14:15])
self.tm['durac'].append(self.linha[19:24])
self.tm['rede'].append(self.linha[29:30])
self.tm['patamar'].append(self.linha[33:39])
continue
if mne == 'sist':
self.sist['mne'].append(self.linha[:6])
self.sist['num'].append(self.linha[7:9])
self.sist['mne_iden'].append(self.linha[10:12])
self.sist['flag'].append(self.linha[13:15])
self.sist['nome'].append(self.linha[16:26])
continue
if mne == 'ree':
self.ree['mne'].append(self.linha[:3])
self.ree['num_ree'].append(self.linha[6:8])
self.ree['num_sub'].append(self.linha[9:11])
self.ree['nome'].append(self.linha[12:22])
continue
if mne_sigla == 'uh':
self.uh['mne'].append(self.linha[:2])
self.uh['ind'].append(self.linha[4:7])
self.uh['nome'].append(self.linha[9:21])
self.uh['ss'].append(self.linha[24:26])
self.uh['vinic'].append(self.linha[29:39])
self.uh['evap'].append(self.linha[39:40])
self.uh['di'].append(self.linha[41:43])
self.uh['hi'].append(self.linha[44:46])
self.uh['m'].append(self.linha[47:48])
self.uh['vmor'].append(self.linha[49:59])
self.uh['prod'].append(self.linha[64:65])
self.uh['rest'].append(self.linha[69:70])
continue
if mne == 'tviag':
self.tviag['mne'].append(self.linha[:6])
self.tviag['mont'].append(self.linha[6:9])
self.tviag['jus'].append(self.linha[10:13])
self.tviag['tp'].append(self.linha[14:15])
self.tviag['hr'].append(self.linha[19:22])
self.tviag['tpTviag'].append(self.linha[24:25])
continue
if mne_sigla == 'ut':
self.ut['mne'].append(self.linha[:2])
self.ut['num'].append(self.linha[4:7])
self.ut['nome'].append(self.linha[9:21])
self.ut['ss'].append(self.linha[22:24])
self.ut['flag'].append(self.linha[25:26])
self.ut['di'].append(self.linha[27:29])
self.ut['hi'].append(self.linha[30:32])
self.ut['mi'].append(self.linha[33:34])
self.ut['df'].append(self.linha[35:37])
self.ut['hf'].append(self.linha[38:40])
self.ut['mf'].append(self.linha[41:42])
self.ut['rest'].append(self.linha[46:47])
self.ut['gmin'].append(self.linha[47:57])
self.ut['gmax'].append(self.linha[57:67])
self.ut['g_anterior'].append(self.linha[67:77])
continue
if mne == 'usie':
self.usie['mne'].append(self.linha[:4])
self.usie['num'].append(self.linha[5:8])
self.usie['ss'].append(self.linha[9:11])
self.usie['nome'].append(self.linha[14:26])
self.usie['mont'].append(self.linha[29:32])
self.usie['jus'].append(self.linha[34:37])
self.usie['qmin'].append(self.linha[39:49])
self.usie['qmax'].append(self.linha[49:59])
self.usie['taxa_consumo'].append(self.linha[59:69])
continue
if mne_sigla == 'dp':
self.dp['mne'].append(self.linha[:2])
self.dp['ss'].append(self.linha[4:6])
self.dp['di'].append(self.linha[8:10])
self.dp['hi'].append(self.linha[11:13])
self.dp['mi'].append(self.linha[14:15])
self.dp['df'].append(self.linha[16:18])
self.dp['hf'].append(self.linha[19:21])
self.dp['mf'].append(self.linha[22:23])
self.dp['demanda'].append(self.linha[24:34])
continue
if mne_sigla == 'de':
self.de['mne'].append(self.linha[:2])
self.de['nde'].append(self.linha[4:7])
self.de['di'].append(self.linha[8:10])
self.de['hi'].append(self.linha[11:13])
self.de['mi'].append(self.linha[14:15])
self.de['df'].append(self.linha[16:18])
self.de['hf'].append(self.linha[19:21])
self.de['mf'].append(self.linha[22:23])
self.de['demanda'].append(self.linha[24:34])
self.de['justific'].append(self.linha[35:45])
continue
if mne_sigla == 'cd':
self.cd['mne'].append(self.linha[:2])
self.cd['is'].append(self.linha[3:5])
self.cd['cd'].append(self.linha[6:8])
self.cd['di'].append(self.linha[9:11])
self.cd['hi'].append(self.linha[12:14])
self.cd['mi'].append(self.linha[15:16])
self.cd['df'].append(self.linha[17:19])
self.cd['hf'].append(self.linha[20:22])
self.cd['mf'].append(self.linha[23:24])
self.cd['custo'].append(self.linha[25:35])
self.cd['limsup'].append(self.linha[35:45])
continue
if mne_sigla == 'ri':
self.ri['mne'].append(self.linha[:2])
self.ri['di'].append(self.linha[8:10])
self.ri['hi'].append(self.linha[11:13])
self.ri['mi'].append(self.linha[14:15])
self.ri['df'].append(self.linha[16:18])
self.ri['hf'].append(self.linha[19:21])
self.ri['mf'].append(self.linha[22:23])
self.ri['gh50min'].append(self.linha[26:36])
self.ri['gh50max'].append(self.linha[36:46])
self.ri['gh60min'].append(self.linha[46:56])
self.ri['gh60max'].append(self.linha[56:66])
self.ri['ande'].append(self.linha[66:76])
continue
if mne_sigla == 'ia':
self.ia['mne'].append(self.linha[:2])
self.ia['ss1'].append(self.linha[4:6])
self.ia['ss2'].append(self.linha[9:11])
self.ia['di'].append(self.linha[13:15])
self.ia['hi'].append(self.linha[16:18])
self.ia['mi'].append(self.linha[19:20])
self.ia['df'].append(self.linha[21:23])
self.ia['hf'].append(self.linha[24:26])
self.ia['mf'].append(self.linha[27:28])
self.ia['ss1_ss2'].append(self.linha[29:39])
self.ia['ss2_ss1'].append(self.linha[39:49])
continue
if mne_sigla == 'rd':
self.rd['mne'].append(self.linha[:2])
self.rd['flag_fol'].append(self.linha[4:5])
self.rd['ncirc'].append(self.linha[8:12])
self.rd['dbar'].append(self.linha[14:15])
self.rd['lim'].append(self.linha[16:17])
self.rd['dlin'].append(self.linha[18:19])
self.rd['perd'].append(self.linha[20:21])
self.rd['formato'].append(self.linha[22:23])
continue
if mne == 'rivar':
self.rivar['mne'].append(self.linha[:5])
self.rivar['num'].append(self.linha[7:10])
self.rivar['ss'].append(self.linha[11:14])
self.rivar['cod'].append(self.linha[15:17])
self.rivar['penalidade'].append(self.linha[19:29])
continue
if mne_sigla == 'it':
self.it['mne'].append(self.linha[:2])
self.it['num'].append(self.linha[4:6])
self.it['coef'].append(self.linha[9:84])
continue
if mne_sigla == 'gp':
self.gp['mne'].append(self.linha[:2])
self.gp['tol_conv'].append(self.linha[4:14])
self.gp['tol_prob'].append(self.linha[15:25])
continue
if mne_sigla == 'ni':
self.ni['mne'].append(self.linha[:2])
self.ni['flag'].append(self.linha[4:5])
self.ni['nmax'].append(self.linha[9:12])
continue
if mne_sigla == 've':
self.ve['mne'].append(self.linha[:2])
self.ve['ind'].append(self.linha[4:7])
self.ve['di'].append(self.linha[8:10])
self.ve['hi'].append(self.linha[11:13])
self.ve['mi'].append(self.linha[14:15])
self.ve['df'].append(self.linha[16:18])
self.ve['hf'].append(self.linha[19:21])
self.ve['mf'].append(self.linha[22:23])
self.ve['vol'].append(self.linha[24:34])
continue
if mne_sigla == 'ci' or mne_sigla == 'ce':
self.ci_ce['mne'].append(self.linha[:2])
self.ci_ce['num'].append(self.linha[3:6])
self.ci_ce['nome'].append(self.linha[7:17])
self.ci_ce['ss_busf'].append(self.linha[18:23])
self.ci_ce['flag'].append(self.linha[23:24])
self.ci_ce['di'].append(self.linha[25:27])
self.ci_ce['hi'].append(self.linha[28:30])
self.ci_ce['mi'].append(self.linha[31:32])
self.ci_ce['df'].append(self.linha[33:35])
self.ci_ce['hf'].append(self.linha[36:38])
self.ci_ce['mf'].append(self.linha[39:40])
self.ci_ce['unid'].append(self.linha[41:42])
self.ci_ce['linf'].append(self.linha[43:53])
self.ci_ce['lsup'].append(self.linha[53:63])
self.ci_ce['custo'].append(self.linha[63:73])
self.ci_ce['energia'].append(self.linha[73:83])
continue
if mne_sigla == 're':
self.re['mne'].append(self.linha[:2])
self.re['ind'].append(self.linha[4:7])
self.re['di'].append(self.linha[9:11])
self.re['hi'].append(self.linha[12:14])
self.re['mi'].append(self.linha[15:16])
self.re['df'].append(self.linha[17:19])
self.re['hf'].append(self.linha[20:22])
self.re['mf'].append(self.linha[23:24])
continue
if mne_sigla == 'lu':
self.lu['mne'].append(self.linha[:2])
self.lu['ind'].append(self.linha[4:7])
self.lu['di'].append(self.linha[8:10])
self.lu['hi'].append(self.linha[11:13])
self.lu['mi'].append(self.linha[14:15])
self.lu['df'].append(self.linha[16:18])
self.lu['hf'].append(self.linha[19:21])
self.lu['mf'].append(self.linha[22:23])
self.lu['linf'].append(self.linha[24:34])
self.lu['lsup'].append(self.linha[34:44])
continue
if mne_sigla == 'fh':
self.fh['mne'].append(self.linha[:2])
self.fh['ind'].append(self.linha[4:7])
self.fh['di'].append(self.linha[8:10])
self.fh['hi'].append(self.linha[11:13])
self.fh['mi'].append(self.linha[14:15])
self.fh['df'].append(self.linha[16:18])
self.fh['hf'].append(self.linha[19:21])
self.fh['mf'].append(self.linha[22:23])
self.fh['ush'].append(self.linha[24:27])
self.fh['unh'].append(self.linha[27:29])
self.fh['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'ft':
self.ft['mne'].append(self.linha[:2])
self.ft['ind'].append(self.linha[4:7])
self.ft['di'].append(self.linha[8:10])
self.ft['hi'].append(self.linha[11:13])
self.ft['mi'].append(self.linha[14:15])
self.ft['df'].append(self.linha[16:18])
self.ft['hf'].append(self.linha[19:21])
self.ft['mf'].append(self.linha[22:23])
self.ft['ust'].append(self.linha[24:27])
self.ft['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fi':
self.fi['mne'].append(self.linha[:2])
self.fi['ind'].append(self.linha[4:7])
self.fi['di'].append(self.linha[8:10])
self.fi['hi'].append(self.linha[11:13])
self.fi['mi'].append(self.linha[14:15])
self.fi['df'].append(self.linha[16:18])
self.fi['hf'].append(self.linha[19:21])
self.fi['mf'].append(self.linha[22:23])
self.fi['ss1'].append(self.linha[24:26])
self.fi['ss2'].append(self.linha[29:31])
self.fi['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fe':
self.fe['mne'].append(self.linha[:2])
self.fe['ind'].append(self.linha[4:7])
self.fe['di'].append(self.linha[8:10])
self.fe['hi'].append(self.linha[11:13])
self.fe['mi'].append(self.linha[14:15])
self.fe['df'].append(self.linha[16:18])
self.fe['hf'].append(self.linha[19:21])
self.fe['mf'].append(self.linha[22:23])
self.fe['num_contrato'].append(self.linha[24:27])
self.fe['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fr':
self.fr['mne'].append(self.linha[:2])
self.fr['ind'].append(self.linha[4:9])
self.fr['di'].append(self.linha[10:12])
self.fr['hi'].append(self.linha[13:15])
self.fr['mi'].append(self.linha[16:17])
self.fr['df'].append(self.linha[18:20])
self.fr['hf'].append(self.linha[21:23])
self.fr['mf'].append(self.linha[24:25])
self.fr['useol'].append(self.linha[26:31])
self.fr['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'fc':
self.fc['mne'].append(self.linha[:2])
self.fc['ind'].append(self.linha[4:7])
self.fc['di'].append(self.linha[10:12])
self.fc['hi'].append(self.linha[13:15])
self.fc['mi'].append(self.linha[16:17])
self.fc['df'].append(self.linha[18:20])
self.fc['hf'].append(self.linha[21:23])
self.fc['mf'].append(self.linha[24:25])
self.fc['demanda'].append(self.linha[26:29])
self.fc['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'ac':
self.ac['mne'].append(self.linha[:2])
self.ac['usi'].append(self.linha[4:7])
self.ac['mneumonico'].append(self.linha[9:15])
self.ac['ind'].append(self.linha[15:19])
self.ac['valor'].append(self.linha[19:])
continue
if mne_sigla == 'da':
self.da['mne'].append(self.linha[:2])
self.da['ind'].append(self.linha[4:7])
self.da['di'].append(self.linha[8:10])
self.da['hi'].append(self.linha[11:13])
self.da['mi'].append(self.linha[14:15])
self.da['df'].append(self.linha[16:18])
self.da['hf'].append(self.linha[19:21])
self.da['mf'].append(self.linha[22:23])
self.da['taxa'].append(self.linha[24:34])
self.da['obs'].append(self.linha[35:47])
continue
if mne_sigla == 'fp':
self.fp['mne'].append(self.linha[:2])
self.fp['usi'].append(self.linha[3:6])
self.fp['f'].append(self.linha[7:8])
self.fp['nptQ'].append(self.linha[10:13])
self.fp['nptV'].append(self.linha[15:18])
self.fp['concavidade'].append(self.linha[20:21])
self.fp['min_quadraticos'].append(self.linha[24:25])
self.fp['deltaV'].append(self.linha[29:39])
self.fp['tr'].append(self.linha[39:49])
continue
if mne_sigla == 'ez':
self.ez['mne'].append(self.linha[:2])
self.ez['usi'].append(self.linha[4:7])
self.ez['perc_vol'].append(self.linha[9:14])
continue
if mne_sigla == 'ag':
self.ag['mne'].append(self.linha[:2])
self.ag['num_estagios'].append(self.linha[3:6])
continue
if mne_sigla == 'mh':
self.mh['mne'].append(self.linha[:2])
self.mh['num'].append(self.linha[4:7])
self.mh['gr'].append(self.linha[9:11])
self.mh['id'].append(self.linha[12:14])
self.mh['di'].append(self.linha[14:16])
self.mh['hi'].append(self.linha[17:19])
self.mh['mi'].append(self.linha[20:21])
self.mh['df'].append(self.linha[22:24])
self.mh['hf'].append(self.linha[25:27])
self.mh['mf'].append(self.linha[28:29])
self.mh['f'].append(self.linha[30:31])
continue
if mne_sigla == 'mt':
self.mt['mne'].append(self.linha[:2])
self.mt['ute'].append(self.linha[4:7])
self.mt['ug'].append(self.linha[8:11])
self.mt['di'].append(self.linha[13:15])
self.mt['hi'].append(self.linha[16:18])
self.mt['mi'].append(self.linha[19:20])
self.mt['df'].append(self.linha[21:23])
self.mt['hf'].append(self.linha[24:26])
self.mt['mf'].append(self.linha[27:28])
self.mt['f'].append(self.linha[29:30])
continue
if mne_sigla == 'tx':
self.tx['mne'].append(self.linha[:2])
self.tx['taxa_fcf'].append(self.linha[4:14])
continue
if mne_sigla == 'pq':
self.pq['mne'].append(self.linha[:2])
self.pq['ind'].append(self.linha[4:7])
self.pq['nome'].append(self.linha[9:19])
self.pq['ss/b'].append(self.linha[19:24])
self.pq['di'].append(self.linha[24:26])
self.pq['hi'].append(self.linha[27:29])
self.pq['mi'].append(self.linha[30:31])
self.pq['df'].append(self.linha[32:34])
self.pq['hf'].append(self.linha[35:37])
self.pq['mf'].append(self.linha[38:39])
self.pq['geracao'].append(self.linha[40:50])
continue
if mne == 'secr':
self.secr['mne'].append(self.linha[:4])
self.secr['num'].append(self.linha[5:8])
self.secr['nome'].append(self.linha[9:21])
self.secr['usi_1'].append(self.linha[24:27])
self.secr['fator_1'].append(self.linha[28:33])
self.secr['usi_2'].append(self.linha[34:37])
self.secr['fator_2'].append(self.linha[38:43])
self.secr['usi_3'].append(self.linha[44:47])
self.secr['fator_3'].append(self.linha[48:53])
self.secr['usi_4'].append(self.linha[54:57])
self.secr['fator_4'].append(self.linha[58:63])
self.secr['usi_5'].append(self.linha[64:67])
self.secr['fator_5'].append(self.linha[68:73])
continue
if mne_sigla == 'cr':
self.cr['mne'].append(self.linha[:2])
self.cr['num'].append(self.linha[4:7])
self.cr['nome'].append(self.linha[9:21])
self.cr['gr'].append(self.linha[24:26])
self.cr['A0'].append(self.linha[27:42])
self.cr['A1'].append(self.linha[43:58])
self.cr['A2'].append(self.linha[59:74])
self.cr['A3'].append(self.linha[75:90])
self.cr['A4'].append(self.linha[91:106])
self.cr['A5'].append(self.linha[107:122])
self.cr['A6'].append(self.linha[123:138])
continue
if mne_sigla == 'r11':
self.r11['mne'].append(self.linha[:3])
self.r11['di'].append(self.linha[4:6])
self.r11['hi'].append(self.linha[7:9])
self.r11['mi'].append(self.linha[10:11])
self.r11['df'].append(self.linha[12:14])
self.r11['hf'].append(self.linha[15:17])
self.r11['mf'].append(self.linha[18:19])
self.r11['cotaIni'].append(self.linha[20:30])
self.r11['varhora'].append(self.linha[30:40])
self.r11['vardia'].append(self.linha[40:50])
self.r11['coef'].append(self.linha[59:164])
continue
if mne_sigla == 'vr':
self.vr['mne'].append(self.linha[:2])
self.vr['dia'].append(self.linha[4:6])
self.vr['mneumo_verao'].append(self.linha[9:12])
continue
if mne_sigla == 'pd':
self.pd['mne'].append(self.linha[:2])
self.pd['tol_perc'].append(self.linha[3:9])
self.pd['tol_MW'].append(self.linha[12:22])
continue
if mne_sigla == 'vm':
self.vm['mne'].append(self.linha[:2])
self.vm['ind'].append(self.linha[4:7])
self.vm['di'].append(self.linha[8:10])
self.vm['hi'].append(self.linha[11:13])
self.vm['mi'].append(self.linha[14:15])
self.vm['df'].append(self.linha[16:18])
self.vm['hf'].append(self.linha[19:21])
self.vm['mf'].append(self.linha[22:23])
self.vm['taxa_enchimento'].append(self.linha[24:34])
continue
if mne_sigla == 'df':
self.df['mne'].append(self.linha[:2])
self.df['ind'].append(self.linha[4:7])
self.df['di'].append(self.linha[8:10])
self.df['hi'].append(self.linha[11:13])
self.df['mi'].append(self.linha[14:15])
self.df['df'].append(self.linha[16:18])
self.df['hf'].append(self.linha[19:21])
self.df['mf'].append(self.linha[22:23])
self.df['taxa_descarga'].append(self.linha[24:34])
continue
if mne_sigla == 'me':
self.me['mne'].append(self.linha[:2])
self.me['ind'].append(self.linha[4:7])
self.me['di'].append(self.linha[8:10])
self.me['hi'].append(self.linha[11:13])
self.me['mi'].append(self.linha[14:15])
self.me['df'].append(self.linha[16:18])
self.me['hf'].append(self.linha[19:21])
self.me['mf'].append(self.linha[22:23])
self.me['fator'].append(self.linha[24:34])
continue
if mneumo == 'meta cjsist':
self.meta_cjsist['mneumo'].append(self.linha[:13])
self.meta_cjsist['ind'].append(self.linha[14:17])
self.meta_cjsist['nome'].append(self.linha[18:20])
continue
if mneumo == 'meta receb':
self.meta_sist['mne'].append(self.linha[:13])
self.meta_sist['ind'].append(self.linha[14:17])
self.meta_sist['tp'].append(self.linha[19:21])
self.meta_sist['num'].append(self.linha[22:23])
self.meta_sist['meta'].append(self.linha[24:34])
self.meta_sist['tol_MW'].append(self.linha[34:44])
self.meta_sist['tol_perc'].append(self.linha[44:54])
continue
if mneumo == 'meta gter':
self.meta_usit['mne'].append(self.linha[:13])
self.meta_usit['ind'].append(self.linha[14:17])
self.meta_usit['tp'].append(self.linha[19:21])
self.meta_usit['num'].append(self.linha[22:23])
self.meta_usit['meta'].append(self.linha[24:34])
self.meta_usit['tol_MW'].append(self.linha[34:44])
self.meta_usit['tol_perc'].append(self.linha[44:54])
continue
if mne_sigla == 'sh':
self.sh['mne'].append(self.linha[:2])
self.sh['flag_simul'].append(self.linha[4:5])
self.sh['flag_pl'].append(self.linha[9:10])
self.sh['num_min'].append(self.linha[14:17])
self.sh['num_max'].append(self.linha[19:22])
self.sh['flag_quebra'].append(self.linha[24:25])
self.sh['ind_1'].append(self.linha[29:32])
self.sh['ind_2'].append(self.linha[34:37])
self.sh['ind_3'].append(self.linha[39:42])
self.sh['ind_4'].append(self.linha[44:47])
self.sh['ind_5'].append(self.linha[49:52])
continue
if mne_sigla == 'tf':
self.tf['mne'].append(self.linha[:2])
self.tf['custo'].append(self.linha[4:14])
continue
if mne_sigla == 'rs':
self.rs['mne'].append(self.linha[:2])
self.rs['cod'].append(self.linha[3:6])
self.rs['ind'].append(self.linha[7:11])
self.rs['subs'].append(self.linha[12:16])
self.rs['tp'].append(self.linha[22:26])
self.rs['comentario'].append(self.linha[27:39])
continue
if mne_sigla == 'sp':
self.sp['mne'].append(self.linha[:2])
self.sp['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'ps':
self.ps['mne'].append(self.linha[:2])
self.ps['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'pp':
self.pp['mne'].append(self.linha[:2])
self.pp['flag'].append(self.linha[3:4])
self.pp['iteracoes'].append(self.linha[5:8])
self.pp['num'].append(self.linha[9:12])
self.pp['tp'].append(self.linha[13:14])
continue
except Exception as err:
if isinstance(err, StopIteration):
self.bloco_tm['df'] = pd.DataFrame(self.tm)
self.bloco_sist['df'] = | pd.DataFrame(self.sist) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import datetime
import requests
from tqdm import tqdm
from collections import Counter
import joblib
import os
# TODO: re-implement sucking data from the internet by checking for all days
# and sucking only what it needs and put that in the load_data module
# so it automatically happens whenever you load the data, rather
# than having to manually do it here.
#####
# Step 1: Update counts data
#####
# from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
# # get today's date
# yesterdays_date_str = (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
# print(f'Yesterday: {yesterdays_date_str}')
# yesterdays_date_str_for_JHU_data = (datetime.date.today() - datetime.timedelta(days=1)).strftime('%m-%d-%Y')
# print(f'Yesterday: {yesterdays_date_str}')
#
# url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{yesterdays_date_str_for_JHU_data}.csv"
# r = requests.get(url, allow_redirects=True)
# with open(f'source_data/csse_covid_19_daily_reports/{yesterdays_date_str_for_JHU_data}.csv', 'w') as f:
# f.write(r.content.decode("utf-8"))
if not os.path.exists('loaded_data'):
os.mkdir('loaded_data')
today_str = datetime.datetime.today().strftime('%Y-%m-%d')
loaded_data_filename = os.path.join('loaded_data', today_str) + '.joblib'
success = False
try:
print(f'Loading {loaded_data_filename}...')
tmp_dict = joblib.load(loaded_data_filename)
map_state_to_series = tmp_dict['map_state_to_series']
current_cases_ranked_us_states = tmp_dict['current_cases_ranked_us_states']
current_cases_ranked_non_us_states = tmp_dict['current_cases_ranked_non_us_states']
current_cases_ranked_non_us_provinces = tmp_dict['current_cases_ranked_non_us_provinces']
current_cases_ranked_us_counties = tmp_dict['current_cases_ranked_us_counties']
map_state_to_current_case_cnt = tmp_dict['map_state_to_current_case_cnt']
map_state_to_fips = tmp_dict['map_state_to_fips']
print('...done!')
success = True
except:
print('...loading failed!')
if not success:
# don't download on the server
if os.environ['PWD'] != '/home/data/src/covid_model':
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv"
r = requests.get(url, allow_redirects=True)
with open('source_data/states.csv', 'w') as f:
f.write(r.content.decode("utf-8"))
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
r = requests.get(url, allow_redirects=True)
with open('source_data/counties.csv', 'w') as f:
f.write(r.content.decode("utf-8"))
print('Downloading last month of data if not available')
for days_back in tqdm(range(1, 28)):
date = datetime.date.today() - datetime.timedelta(days=days_back)
date_str = date.strftime('%m-%d-%Y')
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date_str}.csv"
filename = f'source_data/csse_covid_19_daily_reports/{date_str}.csv'
if not os.path.exists(filename):
r = requests.get(url, allow_redirects=True)
print(filename, len(r.content.decode("utf-8")))
with open(filename, 'w') as f:
f.write(r.content.decode("utf-8"))
#####
# Step 1: Get US Data States
#####
print('Processing U.S. States...')
data_dir = 'source_data'
us_full_count_data = pd.read_csv(os.path.join(data_dir, 'states.csv'))
# from https://github.com/nytimes/covid-19-data
# curl https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv
us_full_count_data['date'] = us_full_count_data['date'].astype('datetime64[ns]')
us_full_count_data['state_orig'] = us_full_count_data['state']
us_full_count_data['state'] = [f'US: {us_full_count_data.iloc[i]["state"]}' for i in range(len(us_full_count_data))]
us_full_count_data.rename(columns={'cases': 'positive', 'deaths': 'deceased'},
inplace=True)
quick_grab_tuples = list(
set(zip(*[us_full_count_data[col] for col in ['state', 'state_orig', 'fips']])))
map_state_to_fips = {tmp_tuple[0]: tmp_tuple[2] for tmp_tuple in quick_grab_tuples}
us_full_count_data = us_full_count_data[['date', 'state', 'positive', 'deceased']]
# get totals across U.S.
list_of_dict_totals = list()
for date in sorted(set(us_full_count_data['date'])):
date_iloc = [i for i, x in enumerate(us_full_count_data['date']) if x == date]
sum_cases = sum(us_full_count_data.iloc[date_iloc]['positive'])
sum_deaths = sum(us_full_count_data.iloc[date_iloc]['deceased'])
list_of_dict_totals.append({'date': date, 'positive': sum_cases, 'deceased': sum_deaths, 'state': 'US: total'})
us_total_counts_data = pd.DataFrame(list_of_dict_totals)
us_full_count_data = us_full_count_data.append(us_total_counts_data, ignore_index=True)
us_states = sorted(set(us_full_count_data['state']))
#####
# Step 1b: Get US Data Counties
#####
print('Processing U.S. Counties...')
data_dir = 'source_data'
us_county_full_data = pd.read_csv(os.path.join(data_dir, 'counties.csv'))
# from https://github.com/nytimes/covid-19-data
# curl https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv
us_county_full_data['date'] = us_county_full_data['date'].astype('datetime64[ns]')
us_county_full_data['state_orig'] = us_county_full_data['state']
us_county_full_data['state'] = [
f'US: {state}: {county}' for state, county in zip(us_county_full_data['state'], us_county_full_data['county'])]
quick_grab_tuples = list(
set(zip(*[us_county_full_data[col] for col in ['state', 'state_orig', 'county', 'fips']])))
tmp_map_state_to_fips = {tmp_tuple[0]: tmp_tuple[3] for tmp_tuple in quick_grab_tuples}
map_state_to_fips.update(tmp_map_state_to_fips)
us_county_full_data.rename(columns={'cases': 'positive', 'deaths': 'deceased'},
inplace=True)
us_county_full_data = us_county_full_data[['date', 'state', 'positive', 'deceased']]
us_counties = sorted(set(us_county_full_data['state']))
us_full_count_data = pd.concat([us_full_count_data, us_county_full_data])
######
# Step 2a: Get International Data Nations
######
print('Processing non-U.S. Nations...')
data_dir = os.path.join('source_data', 'csse_covid_19_daily_reports')
onlyfiles = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
list_of_small_dataframes = list()
for file in tqdm(sorted(onlyfiles)):
if not file.endswith('.csv'):
continue
full_filename = os.path.join(data_dir, file)
tmp_count_data = pd.read_csv(os.path.join(data_dir, file))
tmp_count_data.rename(columns={'Country_Region': 'Country/Region', 'Province_State': 'Province/State'},
inplace=True)
print(f'processing file {full_filename} with {len(tmp_count_data)} rows...')
tmp_count_data['date'] = datetime.datetime.strptime(file[:-4], '%m-%d-%Y')
list_of_small_dataframes.append(tmp_count_data)
# Filter out data associated with provinces
full_count_data = pd.concat(list_of_small_dataframes)
# null_provice_inds = [i for i, x in enumerate(full_count_data['Province/State']) if type(x) != str]
# full_count_data = full_count_data.iloc[null_provice_inds]
full_count_data = full_count_data.groupby(['date', 'Country/Region'])[['Confirmed', 'Deaths']].sum().reset_index()
full_count_data.rename(columns={'Country/Region': 'state', 'Confirmed': 'positive', 'Deaths': 'deceased'},
inplace=True)
# get totals across U.S. (again)
# us_total_counts_data['state'] = 'United States'
# full_count_data = full_count_data.append(us_total_counts_data, ignore_index=True)
non_us_countries = sorted(set(full_count_data['state']))
full_count_data = pd.concat([full_count_data, us_full_count_data])
######
# Step 2b: Get International Data Provinces
######
print('Processing non-U.S. Provinces...')
data_dir = os.path.join('source_data', 'csse_covid_19_daily_reports')
onlyfiles = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
list_of_small_dataframes = list()
for file in tqdm(sorted(onlyfiles)):
if not file.endswith('.csv'):
continue
full_filename = os.path.join(data_dir, file)
tmp_count_data = pd.read_csv(os.path.join(data_dir, file))
tmp_count_data.rename(columns={'Country_Region': 'Country/Region', 'Province_State': 'Province/State'},
inplace=True)
print(f'processing file {full_filename} with {len(tmp_count_data)} rows...')
tmp_count_data['date'] = datetime.datetime.strptime(file[:-4], '%m-%d-%Y')
list_of_small_dataframes.append(tmp_count_data)
# Filter out data associated with provinces
province_full_data = pd.concat(list_of_small_dataframes)
# null_provice_inds = [i for i, x in enumerate(full_count_data['Province/State']) if type(x) != str]
# full_count_data = full_count_data.iloc[null_provice_inds]
province_full_data = province_full_data.groupby(['date', 'Country/Region', 'Province/State'])[
['Confirmed', 'Deaths']].sum().reset_index()
province_full_data['state'] = [f'{country}: {province}' for country, province in
zip(province_full_data['Country/Region'], province_full_data['Province/State'])]
province_full_data.rename(columns={'Confirmed': 'positive', 'Deaths': 'deceased'},
inplace=True)
# get totals across U.S. (again)
# us_total_counts_data['state'] = 'United States'
# full_count_data = full_count_data.append(us_total_counts_data, ignore_index=True)
non_us_provinces = sorted(set(province_full_data['state']))
full_count_data = | pd.concat([full_count_data, province_full_data]) | pandas.concat |
from data_model import *
import numpy as np
import pandas as pd
#just a copy of imports use over multible files
import pandas as pd
import numpy as np
import matplotlib.pyplot as pyplot
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import accuracy_score
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import logistic
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from matplotlib import pyplot
from sklearn.preprocessing import StandardScaler
import requests
url = 'https://data.cyverse.org/dav-anon/iplant/home/nirav/ACIC-2021/Dataset_Competition_Zip_File.zip'
r = requests.get(url, allow_redirects=True)
open('Dataset_Competition_Zip_File.zip', 'wb').write(r.content)
import zipfile
with zipfile.ZipFile('Dataset_Competition_Zip_File.zip', 'r') as zip_ref:
zip_ref.extractall('.')
inputs_other = np.load('Dataset_Competition\Training\inputs_others_train.npy')
yield_train = np.load('Dataset_Competition\Training\yield_train.npy')
inputs_weather_train = np.load('Dataset_Competition\Training\inputs_weather_train.npy')
clusterID_genotype = np.load('Dataset_Competition\clusterID_genotype.npy')
'''
This functiion is used to make a model
based on a decsision tree that makes a tree for each of the
days than uses this to make a prediction over each day
'''
def DecsisionTree(data,day):
data_set = data
data_set.head(12)
y = data_set['yield']
x = data_set['AvgSur']
y = (y.to_numpy()).reshape(-1, 1)
x = (x.to_numpy()).reshape(-1, 1)
#['ADNI','AP','ARH','MDNI','MaxSur','MinSur',]
test_size = 0.5
seed = 5
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = test_size, random_state = seed)
model = DecisionTreeRegressor()
model.fit(x_train, y_train)
predictions = model.predict(x_train)
predictions = model.predict(x_test)
x_train = pd.DataFrame(x_train)
y_train = pd.DataFrame(y_train)
x_test = pd.DataFrame(x_test)
y_test = pd.DataFrame(y_test)
test_other = np.load('Dataset_Competition\Test Inputs\inputs_others_test.npy')
inputs_weather_test = np.load('Dataset_Competition\Test Inputs\inputs_weather_test.npy')
sample_two = pull_one_day(inputs_weather_test,day,10337)
sample_two = pd.DataFrame(sample_two)
test_other = | pd.DataFrame(test_other) | pandas.DataFrame |
import natsort
import numpy as np
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import re
import traceback
from io import BytesIO
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise as pw
import json
import statistics
import matplotlib.pyplot as plt
import matplotlib_venn as venn
from matplotlib_venn import venn2, venn3, venn3_circles
from PIL import Image
from upsetplot import from_memberships
from upsetplot import plot as upplot
import pkg_resources
def natsort_index_keys(x):
order = natsort.natsorted(np.unique(x.values))
return pd.Index([order.index(el) for el in x], name=x.name)
def natsort_list_keys(x):
order = natsort.natsorted(np.unique(x))
return [order.index(el) for el in x]
class SpatialDataSet:
regex = {
"imported_columns": "^[Rr]atio H/L (?!normalized|type|is.*|variability|count)[^ ]+|^Ratio H/L variability.... .+|^Ratio H/L count .+|id$|[Mm][Ss].*[cC]ount.+$|[Ll][Ff][Qq].*|.*[nN]ames.*|.*[Pp][rR]otein.[Ii][Dd]s.*|[Pp]otential.[cC]ontaminant|[Oo]nly.[iI]dentified.[bB]y.[sS]ite|[Rr]everse|[Ss]core|[Qq]-[Vv]alue|R.Condition|PG.Genes|PG.ProteinGroups|PG.Cscore|PG.Qvalue|PG.RunEvidenceCount|PG.Quantity|^Proteins$|^Sequence$"
}
acquisition_set_dict = {
"LFQ6 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - Spectronaut" : ["LFQ intensity", "MS/MS count"],
"LFQ5 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"LFQ6 - MQ" : ["[Ll][Ff][Qq].[Ii]ntensity", "[Mm][Ss]/[Mm][Ss].[cC]ount", "[Ii]ntensity"],
"SILAC - MQ" : [ "[Rr]atio.[Hh]/[Ll](?!.[Vv]aria|.[Cc]ount)","[Rr]atio.[Hh]/[Ll].[Vv]ariability.\[%\]", "[Rr]atio.[Hh]/[Ll].[cC]ount"],
"Custom": ["(?!Protein IDs|Gene names)"]
}
Spectronaut_columnRenaming = {
"R.Condition": "Map", "PG.Genes" : "Gene names", "PG.Qvalue": "Q-value", "PG.Cscore":"C-Score",
"PG.ProteinGroups" : "Protein IDs", "PG.RunEvidenceCount" : "MS/MS count", "PG.Quantity" : "LFQ intensity"
}
css_color = ["#b2df8a", "#6a3d9a", "#e31a1c", "#b15928", "#fdbf6f", "#ff7f00", "#cab2d6", "#fb9a99", "#1f78b4", "#ffff99", "#a6cee3",
"#33a02c", "blue", "orange", "goldenrod", "lightcoral", "magenta", "brown", "lightpink", "red", "turquoise",
"khaki", "darkgoldenrod","darkturquoise", "darkviolet", "greenyellow", "darksalmon", "hotpink", "indianred", "indigo","darkolivegreen",
"coral", "aqua", "beige", "bisque", "black", "blanchedalmond", "blueviolet", "burlywood", "cadetblue", "yellowgreen", "chartreuse",
"chocolate", "cornflowerblue", "cornsilk", "darkblue", "darkcyan", "darkgray", "darkgrey", "darkgreen", "darkkhaki", "darkmagenta",
"darkorange", "darkorchid", "darkred", "darkseagreen", "darkslateblue", "snow", "springgreen", "darkslategrey", "mediumpurple", "oldlace",
"olive", "lightseagreen", "deeppink", "deepskyblue", "dimgray", "dimgrey", "dodgerblue", "firebrick", "floralwhite", "forestgreen",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"fuchsia", "gainsboro", "ghostwhite", "gold", "gray", "ivory", "lavenderblush", "lawngreen", "lemonchiffon", "lightblue", "lightcyan",
"lightgoldenrodyellow", "lightgray", "lightgrey", "lightgreen", "lightsalmon", "lightskyblue", "lightslategray", "lightslategrey",
"lightsteelblue", "lightyellow", "lime", "limegreen", "linen", "maroon", "mediumaquamarine", "mediumblue", "mediumseagreen",
"mediumslateblue", "mediumspringgreen", "mediumturquoise", "mediumvioletred", "midnightblue", "mintcream", "mistyrose", "moccasin",
"olivedrab", "orangered", "orchid", "palegoldenrod", "palegreen", "paleturquoise", "palevioletred", "papayawhip", "peachpuff", "peru",
"pink", "plum", "powderblue", "rosybrown", "royalblue", "saddlebrown", "salmon", "sandybrown", "seagreen", "seashell", "sienna", "silver",
"skyblue", "slateblue", "steelblue", "teal", "thistle", "tomato", "violet", "wheat", "white", "whitesmoke", "slategray", "slategrey",
"aquamarine", "azure","crimson", "cyan", "darkslategray", "grey","mediumorchid","navajowhite", "navy"]
analysed_datasets_dict = {}
df_organellarMarkerSet = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/organellemarkers/{}.csv'.format("Homo sapiens - Uniprot")),
usecols=lambda x: bool(re.match("Gene name|Compartment", x)))
df_organellarMarkerSet = df_organellarMarkerSet.rename(columns={"Gene name":"Gene names"})
df_organellarMarkerSet = df_organellarMarkerSet.astype({"Gene names": "str"})
def __init__(self, filename, expname, acquisition, comment, name_pattern="e.g.:.* (?P<cond>.*)_(?P<rep>.*)_(?P<frac>.*)", reannotate_genes=False, **kwargs):
self.filename = filename
self.expname = expname
self.acquisition = acquisition
self.name_pattern = name_pattern
self.comment = comment
self.imported_columns = self.regex["imported_columns"]
self.fractions, self.map_names = [], []
self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
if acquisition == "SILAC - MQ":
if "RatioHLcount" not in kwargs.keys():
self.RatioHLcount = 2
else:
self.RatioHLcount = kwargs["RatioHLcount"]
del kwargs["RatioHLcount"]
if "RatioVariability" not in kwargs.keys():
self.RatioVariability = 30
else:
self.RatioVariability = kwargs["RatioVariability"]
del kwargs["RatioVariability"]
elif acquisition == "Custom":
self.custom_columns = kwargs["custom_columns"]
self.custom_normalized = kwargs["custom_normalized"]
self.imported_columns = "^"+"$|^".join(["$|^".join(el) if type(el) == list else el for el in self.custom_columns.values() if el not in [[], None, ""]])+"$"
#elif acquisition == "LFQ5 - MQ" or acquisition == "LFQ6 - MQ" or acquisition == "LFQ6 - Spectronaut" or acquisition == "LFQ5 - Spectronaut":
else:
if "summed_MSMS_counts" not in kwargs.keys():
self.summed_MSMS_counts = 2
else:
self.summed_MSMS_counts = kwargs["summed_MSMS_counts"]
del kwargs["summed_MSMS_counts"]
if "consecutiveLFQi" not in kwargs.keys():
self.consecutiveLFQi = 4
else:
self.consecutiveLFQi = kwargs["consecutiveLFQi"]
del kwargs["consecutiveLFQi"]
#self.markerset_or_cluster = False if "markerset_or_cluster" not in kwargs.keys() else kwargs["markerset_or_cluster"]
if "organism" not in kwargs.keys():
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format("Homo sapiens - Uniprot")))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
else:
assert kwargs["organism"]+".csv" in pkg_resources.resource_listdir(__name__, "annotations/complexes")
marker_table = pd.read_csv(pkg_resources.resource_stream(__name__, 'annotations/complexes/{}.csv'.format(kwargs["organism"])))
self.markerproteins = {k: v.replace(" ", "").split(",") for k,v in zip(marker_table["Cluster"], marker_table["Members - Gene names"])}
self.organism = kwargs["organism"]
del kwargs["organism"]
self.analysed_datasets_dict = {}
self.analysis_summary_dict = {}
def data_reading(self, filename=None, content=None):
"""
Data import. Can read the df_original from a file or buffer.
df_original contains all information of the raw file; tab separated file is imported,
Args:
self:
filename: string
imported_columns : dictionry; columns that correspond to this regular expression will be imported
filename: default None, to use the class attribute. Otherwise overwrites the class attribute upon success.
content: default None, to use the filename. Any valid input to pd.read_csv can be provided, e.g. a StringIO buffer.
Returns:
self.df_orginal: raw, unprocessed dataframe, single level column index
"""
# use instance attribute if no filename is provided
if filename is None:
filename = self.filename
# if no buffer is provided for the content read straight from the file
if content is None:
content = filename
if filename.endswith("xls") or filename.endswith("txt"):
self.df_original = pd.read_csv(content, sep="\t", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
else: #assuming csv file
self.df_original = pd.read_csv(content, sep=",", comment="#", usecols=lambda x: bool(re.match(self.imported_columns, x)), low_memory = True)
assert self.df_original.shape[0]>10 and self.df_original.shape[1]>5
self.filename = filename
return self.df_original
def processingdf(self, name_pattern=None, summed_MSMS_counts=None, consecutiveLFQi=None, RatioHLcount=None, RatioVariability=None, custom_columns=None, custom_normalized=None):
"""
Analysis of the SILAC/LFQ-MQ/LFQ-Spectronaut data will be performed. The dataframe will be filtered, normalized, and converted into a dataframe,
characterized by a flat column index. These tasks is performed by following functions:
indexingdf(df_original, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
spectronaut_LFQ_indexingdf(df_original, Spectronaut_columnRenaming, acquisition_set_dict, acquisition, fraction_dict, name_pattern)
stringency_silac(df_index)
normalization_01_silac(df_stringency_mapfracstacked):
logarithmization_silac(df_stringency_mapfracstacked):
stringency_lfq(df_index):
normalization_01_lfq(df_stringency_mapfracstacked):
logarithmization_lfq(df_stringency_mapfracstacked):
Args:
self.acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
additional arguments can be used to override the value set by the class init function
Returns:
self:
map_names: list of Map names
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
df_log_stacked: df; log transformed data
analysis_summary_dict["0/1 normalized data - mean"] : 0/1 normalized data across all maps by calculating the mean
["changes in shape after filtering"]
["Unique Proteins"] : unique proteins, derived from the first entry of Protein IDs, seperated by a ";"
["Analysis parameters"] : {"acquisition" : ...,
"filename" : ...,
#SILAC#
"Ratio H/L count 1 (>=X)" : ...,
"Ratio H/L count 2 (>=Y, var<Z)" : ...,
"Ratio variability (<Z, count>=Y)" : ...
#LFQ#
"consecutive data points" : ...,
"summed MS/MS counts" : ...
}
"""
if name_pattern is None:
name_pattern = self.name_pattern
if self.acquisition == "SILAC - MQ":
if RatioHLcount is None:
RatioHLcount = self.RatioHLcount
if RatioVariability is None:
RatioVariability = self.RatioVariability
elif self.acquisition == "Custom":
if custom_columns is None:
custom_columns = self.custom_columns
if custom_normalized is None:
custom_normalized = self.custom_normalized
else:
if summed_MSMS_counts is None:
summed_MSMS_counts = self.summed_MSMS_counts
if consecutiveLFQi is None:
consecutiveLFQi = self.consecutiveLFQi
shape_dict = {}
def indexingdf():
"""
For data output from MaxQuant, all columns - except of "MS/MS count" and "LFQ intensity" (LFQ) | "Ratio H/L count", "Ratio H/L variability [%]"
(SILAC) - will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count", "LFQ intensity"| "Ratio H/L count", "Ratio H/L
variability [%]"), "Fraction" (= defined via "name_pattern") and "Map" (= defined via "name_pattern") as level names, allowing the stacking and
unstacking of the dataframe. The dataframe will be filtered by removing matches to the reverse database, matches only identified by site, and
potential contaminants.
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, one of "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_original
shape_dict["Shape after categorical filtering"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_original.rename({"Proteins": "Protein IDs"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
[[re.findall(s, col)[0] for s in self.acquisition_set_dict[self.acquisition] if re.match(s,col)][0]
for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
try:
df_index = df_original.xs(
np.nan, 0, "Reverse")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Potential contaminant")
except:
pass
try:
df_index = df_index.xs(
np.nan, 0, "Only identified by site")
except:
pass
df_index.replace(0, np.nan, inplace=True)
shape_dict["Shape after categorical filtering"] = df_index.shape
df_index.rename(columns={"MS/MS Count":"MS/MS count"}, inplace=True)
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
##############Cyt should get only be removed if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - MQ":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def custom_indexing_and_normalization():
df_original = self.df_original.copy()
df_original.rename({custom_columns["ids"]: "Protein IDs", custom_columns["genes"]: "Gene names"}, axis=1, inplace=True)
df_original = df_original.set_index([col for col in df_original.columns
if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]]) == False])
# multindex will be generated, by extracting the information about the Map, Fraction and Type from each individual column name
multiindex = pd.MultiIndex.from_arrays(
arrays=[
["normalized profile" for col in df_original.columns],
[re.match(self.name_pattern, col).group("rep") for col in df_original.columns] if not "<cond>" in self.name_pattern
else ["_".join(re.match(self.name_pattern, col).group("cond", "rep")) for col in df_original.columns],
[re.match(self.name_pattern, col).group("frac") for col in df_original.columns],
],
names=["Set", "Map", "Fraction"]
)
df_original.columns = multiindex
df_original.sort_index(1, inplace=True)
shape_dict["Original size"] = df_original.shape
# for custom upload assume full normalization for now. this should be extended to valid value filtering and 0-1 normalization later
df_index = df_original.copy()
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def spectronaut_LFQ_indexingdf():
"""
For data generated from the Spectronaut software, columns will be renamed, such it fits in the scheme of MaxQuant output data. Subsequently, all
columns - except of "MS/MS count" and "LFQ intensity" will be set as index. A multiindex will be generated, containing "Set" ("MS/MS count" and
"LFQ intensity"), Fraction" and "Map" (= defined via "name_pattern"; both based on the column name R.condition - equivalent to the column name "Map"
in df_renamed["Map"]) as level labels.
!!!
!!!It is very important to define R.Fraction, R.condition already during the setup of Spectronaut!!!
!!!
Args:
self:
df_original: dataframe, columns defined through self.imported_columns
Spectronaut_columnRenaming
acquisition_set_dict: dictionary, all columns will be set as index, except of those that are listed in acquisition_set_dict
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut"
fraction_dict: "Fraction" is part of the multiindex; fraction_dict allows the renaming of the fractions e.g. 3K -> 03K
name_pattern: regular expression, to identify Map-Fraction-(Replicate)
Returns:
self:
df_index: mutliindex dataframe, which contains 3 level labels: Map, Fraction, Type
shape_dict["Original size"] of df_index
fractions: list of fractions e.g. ["01K", "03K", ...]
"""
df_original = self.df_original.copy()
df_renamed = df_original.rename(columns=self.Spectronaut_columnRenaming)
df_renamed["Fraction"] = [re.match(self.name_pattern, i).group("frac") for i in df_renamed["Map"]]
df_renamed["Map"] = [re.match(self.name_pattern, i).group("rep") for i in df_renamed["Map"]] if not "<cond>" in self.name_pattern else ["_".join(
re.match(self.name_pattern, i).group("cond", "rep")) for i in df_renamed["Map"]]
df_index = df_renamed.set_index([col for col in df_renamed.columns if any([re.match(s, col) for s in self.acquisition_set_dict[self.acquisition]])==False])
df_index.columns.names = ["Set"]
# In case fractionated data was used this needs to be catched and aggregated
try:
df_index = df_index.unstack(["Map", "Fraction"])
except ValueError:
df_index = df_index.groupby(by=df_index.index.names).agg(np.nansum, axis=0)
df_index = df_index.unstack(["Map", "Fraction"])
df_index.replace(0, np.nan, inplace=True)
shape_dict["Original size"]=df_index.shape
fraction_wCyt = list(df_index.columns.get_level_values("Fraction").unique())
#Cyt is removed only if it is not an NMC split
if "Cyt" in fraction_wCyt and len(fraction_wCyt) >= 4:
df_index.drop("Cyt", axis=1, level="Fraction", inplace=True)
try:
if self.acquisition == "LFQ5 - Spectronaut":
df_index.drop("01K", axis=1, level="Fraction", inplace=True)
except:
pass
self.fractions = natsort.natsorted(list(df_index.columns.get_level_values("Fraction").unique()))
self.df_index = df_index
return df_index
def stringency_silac(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins with complete profiles are considered (a set of f.e. 5 SILAC ratios
in case you have 5 fractions / any proteins with missing values were rejected). Proteins were retained with 3 or more quantifications in each
subfraction (=count). Furthermore, proteins with only 2 quantification events in one or more subfraction were retained, if their ratio variability for
ratios obtained with 2 quantification events was below 30% (=var). SILAC ratios were linearly normalized by division through the fraction median.
Subsequently normalization to SILAC loading was performed.Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Type
RatioHLcount: int, 2
RatioVariability: int, 30
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
shape_dict["Shape after Ratio H/L count (>=3)/var (count>=2, var<30) filtering"] of df_countvarfiltered_stacked
shape_dict["Shape after filtering for complete profiles"] of df_stringency_mapfracstacked
"""
# Fraction and Map will be stacked
df_stack = df_index.stack(["Fraction", "Map"])
# filtering for sufficient number of quantifications (count in "Ratio H/L count"), taken variability (var in Ratio H/L variability [%]) into account
# zip: allows direct comparison of count and var
# only if the filtering parameters are fulfilled the data will be introduced into df_countvarfiltered_stacked
#default setting: RatioHLcount = 2 ; RatioVariability = 30
df_countvarfiltered_stacked = df_stack.loc[[count>RatioHLcount or (count==RatioHLcount and var<RatioVariability)
for var, count in zip(df_stack["Ratio H/L variability [%]"], df_stack["Ratio H/L count"])]]
shape_dict["Shape after Ratio H/L count (>=3)/var (count==2, var<30) filtering"] = df_countvarfiltered_stacked.unstack(["Fraction", "Map"]).shape
# "Ratio H/L":normalization to SILAC loading, each individual experiment (FractionXMap) will be divided by its median
# np.median([...]): only entries, that are not NANs are considered
df_normsilac_stacked = df_countvarfiltered_stacked["Ratio H/L"]\
.unstack(["Fraction", "Map"])\
.apply(lambda x: x/np.nanmedian(x), axis=0)\
.stack(["Map", "Fraction"])
df_stringency_mapfracstacked = df_countvarfiltered_stacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_normsilac_stacked, columns=["Ratio H/L"]))
# dataframe is grouped (Map, id), that allows the filtering for complete profiles
df_stringency_mapfracstacked = df_stringency_mapfracstacked.groupby(["Map", "id"]).filter(lambda x: len(x)>=len(self.fractions))
shape_dict["Shape after filtering for complete profiles"]=df_stringency_mapfracstacked.unstack(["Fraction", "Map"]).shape
# Ratio H/L is converted into Ratio L/H
df_stringency_mapfracstacked["Ratio H/L"] = df_stringency_mapfracstacked["Ratio H/L"].transform(lambda x: 1/x)
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c not in ["Ratio H/L count","Ratio H/L variability [%]","Ratio H/L"]], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" are stacked;
columns "Ratio H/L count", "Ratio H/L variability [%]", and "Ratio H/L" stored as single level indices
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
data_completeness: series, for each individual map, as well as combined maps: 1 - (percentage of NANs)
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "Ratio H/L" is 0-1 normalized and renamed to "normalized
profile"; the columns "Ratio H/L count", "Ratio H/L variability [%]", and "normalized profile" stored as single level indices;
plotting is possible now
self:
analysis_summary_dict["Data/Profile Completeness"] : df, with information about Data/Profile Completeness
column: "Experiment", "Map", "Data completeness", "Profile completeness"
no row index
"""
df_01norm_unstacked = df_stringency_mapfracstacked["Ratio H/L"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_unstacked.div(df_01norm_unstacked.sum(axis=1), axis=0)
df_01_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(pd.DataFrame
(df_01norm_unstacked.stack("Fraction"),columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "normalized profile"
df_01_stacked.columns = [col if col!="Ratio H/L" else "normalized profile" for col in df_01_stacked.columns]
return df_01_stacked
def logarithmization_silac(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("Ratio H/L").
Args:
df_stringency_mapfracstacked: dataframe, in which "MAP" and "Fraction" are stacked; the columns "Ratio H/L count", "Ratio H/L variability [%]",
and "Ratio H/L" stored as single level indices
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "Ratio H/L"
data; the columns "Ratio H/L count", "Ratio H/L variability [%]" and "log profile" are stored as single level indices;
PCA is possible now
"""
# logarithmizing, basis of 2
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["Ratio H/L"].transform(np.log2)
df_log_stacked = df_stringency_mapfracstacked[["Ratio H/L count", "Ratio H/L variability [%]"]].join(
pd.DataFrame(df_lognorm_ratio_stacked, columns=["Ratio H/L"]))
# "Ratio H/L" will be renamed to "log profile"
df_log_stacked.columns = [col if col !="Ratio H/L" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def stringency_lfq(df_index):
"""
The multiindex dataframe is subjected to stringency filtering. Only Proteins which were identified with
at least [4] consecutive data points regarding the "LFQ intensity", and if summed MS/MS counts >= n(fractions)*[2]
(LFQ5: min 10 and LFQ6: min 12, respectively; coverage filtering) were included.
Data is annotated based on specified marker set e.g. eLife.
Args:
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
fractions: list of fractions e.g. ["01K", "03K", ...]
summed_MSMS_counts: int, 2
consecutiveLFQi: int, 4
Returns:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
shape_dict["Shape after MS/MS value filtering"] of df_mscount_mapstacked
shape_dict["Shape after consecutive value filtering"] of df_stringency_mapfracstacked
"""
df_index = df_index.stack("Map")
# sorting the level 0, in order to have LFQ intensity - MS/MS count instead of continuous alternation
df_index.sort_index(axis=1, level=0, inplace=True)
# "MS/MS count"-column: take the sum over the fractions; if the sum is larger than n[fraction]*2, it will be stored in the new dataframe
minms = (len(self.fractions) * self.summed_MSMS_counts)
if minms > 0:
df_mscount_mapstacked = df_index.loc[df_index[("MS/MS count")].apply(np.sum, axis=1) >= minms]
shape_dict["Shape after MS/MS value filtering"]=df_mscount_mapstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_mscount_mapstacked.copy()
else:
df_stringency_mapfracstacked = df_index.copy()
# series no dataframe is generated; if there are at least i.e. 4 consecutive non-NANs, data will be retained
df_stringency_mapfracstacked.sort_index(level="Fraction", axis=1, key=natsort_index_keys, inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.loc[
df_stringency_mapfracstacked[("LFQ intensity")]\
.apply(lambda x: np.isfinite(x), axis=0)\
.apply(lambda x: sum(x) >= self.consecutiveLFQi and any(x.rolling(window=self.consecutiveLFQi).sum() >= self.consecutiveLFQi), axis=1)]
shape_dict["Shape after consecutive value filtering"]=df_stringency_mapfracstacked.unstack("Map").shape
df_stringency_mapfracstacked = df_stringency_mapfracstacked.copy().stack("Fraction")
#Annotation with marker genes
df_organellarMarkerSet = self.df_organellarMarkerSet
df_stringency_mapfracstacked.reset_index(inplace=True)
df_stringency_mapfracstacked = df_stringency_mapfracstacked.merge(df_organellarMarkerSet, how="left", on="Gene names")
df_stringency_mapfracstacked.set_index([c for c in df_stringency_mapfracstacked.columns
if c!="MS/MS count" and c!="LFQ intensity"], inplace=True)
df_stringency_mapfracstacked.rename(index={np.nan : "undefined"}, level="Compartment", inplace=True)
return df_stringency_mapfracstacked
def normalization_01_lfq(df_stringency_mapfracstacked):
"""
The multiindex dataframe, that was subjected to stringency filtering, is 0-1 normalized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked, "LFQ intensity" and "MS/MS count" define a
single-level column index
self:
fractions: list of fractions e.g. ["01K", "03K", ...]
Returns:
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to
"normalized profile"; the columns "normalized profile" and "MS/MS count" are stored as single level indices; plotting is possible now
"""
df_01norm_mapstacked = df_stringency_mapfracstacked["LFQ intensity"].unstack("Fraction")
# 0:1 normalization of Ratio L/H
df_01norm_unstacked = df_01norm_mapstacked.div(df_01norm_mapstacked.sum(axis=1), axis=0)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_01_stacked = df_rest.join(pd.DataFrame(df_01norm_unstacked.stack(
"Fraction"),columns=["LFQ intensity"]))
# rename columns: "LFQ intensity" into "normalized profile"
df_01_stacked.columns = [col if col!="LFQ intensity" else "normalized profile" for col in
df_01_stacked.columns]
#imputation
df_01_stacked = df_01_stacked.unstack("Fraction").replace(np.NaN, 0).stack("Fraction")
df_01_stacked = df_01_stacked.sort_index()
return df_01_stacked
def logarithmization_lfq(df_stringency_mapfracstacked):
"""The multiindex dataframe, that was subjected to stringency filtering, is logarithmized ("LFQ intensity").
Args:
df_stringency_mapfracstacked: dataframe, in which "Map" and "Fraction" is stacked; "LFQ intensity" and "MS/MS count" define a
single-level column index
Returns:
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized
"LFQ intensity"; the columns "log profile" and "MS/MS count" are stored as single level indices; PCA is possible now
"""
df_lognorm_ratio_stacked = df_stringency_mapfracstacked["LFQ intensity"].transform(np.log2)
df_rest = df_stringency_mapfracstacked.drop("LFQ intensity", axis=1)
df_log_stacked = df_rest.join(pd.DataFrame(df_lognorm_ratio_stacked, columns=["LFQ intensity"]))
# "LFQ intensity" will be renamed to "log profile"
df_log_stacked.columns = [col if col!="LFQ intensity" else "log profile" for col in df_log_stacked.columns]
return df_log_stacked
def split_ids_uniprot(el):
"""
This finds the primary canoncial protein ID in the protein group. If no canonical ID is present it selects the first isoform ID.
"""
p1 = el.split(";")[0]
if "-" not in p1:
return p1
else:
p = p1.split("-")[0]
if p in el.split(";"):
return p
else:
return p1
if self.acquisition == "SILAC - MQ":
# Index data
df_index = indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
# Run stringency filtering and normalization
df_stringency_mapfracstacked = stringency_silac(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_01_stacked = normalization_01_silac(df_stringency_mapfracstacked)
self.df_log_stacked = logarithmization_silac(df_stringency_mapfracstacked)
# format and reduce 0-1 normalized data for comparison with other experiments
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop(["Ratio H/L count", "Ratio H/L variability [%]"], inplace=True, axis=1)
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
# poopulate analysis summary dictionary with (meta)data
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"Ratio H/L count" : self.RatioHLcount,
"Ratio variability" : self.RatioVariability,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
# TODO this line needs to be removed.
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
elif self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ" or self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
#if not summed_MS_counts:
# summed_MS_counts = self.summed_MS_counts
#if not consecutiveLFQi:
# consecutiveLFQi = self.consecutiveLFQi
if self.acquisition == "LFQ5 - MQ" or self.acquisition == "LFQ6 - MQ":
df_index = indexingdf()
elif self.acquisition == "LFQ5 - Spectronaut" or self.acquisition == "LFQ6 - Spectronaut":
df_index = spectronaut_LFQ_indexingdf()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_stringency_mapfracstacked = stringency_lfq(df_index)
self.df_stringencyFiltered = df_stringency_mapfracstacked
self.df_log_stacked = logarithmization_lfq(df_stringency_mapfracstacked)
self.df_01_stacked = normalization_01_lfq(df_stringency_mapfracstacked)
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"consecutive data points" : self.consecutiveLFQi,
"summed MS/MS counts" : self.summed_MSMS_counts,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#return self.df_01_stacked
elif self.acquisition == "Custom":
df_index = custom_indexing_and_normalization()
map_names = df_index.columns.get_level_values("Map").unique()
self.map_names = map_names
df_01_stacked = df_index.stack(["Map", "Fraction"])
df_01_stacked = df_01_stacked.reset_index().merge(self.df_organellarMarkerSet, how="left", on="Gene names")
df_01_stacked.set_index([c for c in df_01_stacked.columns if c not in ["normalized profile"]], inplace=True)
df_01_stacked.rename(index={np.nan:"undefined"}, level="Compartment", inplace=True)
self.df_01_stacked = df_01_stacked
df_01_comparison = self.df_01_stacked.copy()
comp_ids = pd.Series([split_ids_uniprot(el) for el in df_01_comparison.index.get_level_values("Protein IDs")], name="Protein IDs")
df_01_comparison.index = df_01_comparison.index.droplevel("Protein IDs")
df_01_comparison.set_index(comp_ids, append=True, inplace=True)
df_01_comparison.drop("MS/MS count", inplace=True, axis=1, errors="ignore")
df_01_comparison = df_01_comparison.unstack(["Map", "Fraction"])
df_01_comparison.columns = ["?".join(el) for el in df_01_comparison.columns.values]
df_01_comparison = df_01_comparison.copy().reset_index().drop(["C-Score", "Q-value", "Score", "Majority protein IDs", "Protein names", "id"], axis=1, errors="ignore")
self.analysis_summary_dict["0/1 normalized data"] = df_01_comparison.to_json()#double_precision=4) #.reset_index()
unique_proteins = [split_ids_uniprot(i) for i in set(self.df_01_stacked.index.get_level_values("Protein IDs"))]
unique_proteins.sort()
self.analysis_summary_dict["Unique Proteins"] = unique_proteins
self.analysis_summary_dict["changes in shape after filtering"] = shape_dict.copy()
analysis_parameters = {"acquisition" : self.acquisition,
"filename" : self.filename,
"comment" : self.comment,
"organism" : self.organism,
}
self.analysis_summary_dict["Analysis parameters"] = analysis_parameters.copy()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
else:
return "I do not know this"
def plot_log_data(self):
"""
Args:
self.df_log_stacked
Returns:
log_histogram: Histogram of log transformed data
"""
log_histogram = px.histogram(self.df_log_stacked.reset_index().sort_values(["Map", "Fraction"], key=natsort_list_keys),
x="log profile",
facet_col="Fraction",
facet_row="Map",
template="simple_white",
labels={"log profile": "log tranformed data ({})".format("LFQ intenisty" if self.acquisition != "SILAC - MQ" else "Ratio H/L")}
)
log_histogram.for_each_xaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.for_each_yaxis(lambda axis: axis.update(title={"text":""}))
log_histogram.add_annotation(x=0.5, y=0, yshift=-50, xref="paper",showarrow=False, yref="paper",
text="log2(LFQ intensity)")
log_histogram.add_annotation(x=0, y=0.5, textangle=270, xref="paper",showarrow=False, yref="paper", xshift=-50,
text="count")
log_histogram.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
return log_histogram
def quantity_profiles_proteinGroups(self):
"""
Number of profiles, protein groups per experiment, and the data completness of profiles (total quantity, intersection) is calculated.
Args:
self:
acquisition: string, "LFQ6 - Spectronaut", "LFQ5 - Spectronaut", "LFQ5 - MQ", "LFQ6 - MQ", "SILAC - MQ"
df_index: multiindex dataframe, which contains 3 level labels: MAP, Fraction, Typ
df_01_stacked: df; 0-1 normalized data with "normalized profile" as column name
Returns:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; containign following information:
npg_t: protein groups per experiment total quantity
npgf_t = groups with valid profiles per experiment total quanitity
npr_t: profiles with any valid values
nprf_t = total number of valid profiles
npg_i: protein groups per experiment intersection
npgf_i = groups with valid profiles per experiment intersection
npr_i: profiles with any valid values in the intersection
nprf_i = total number of valid profiles in the intersection
npr_t_dc: profiles, % values != nan
nprf_t_dc = profiles, total, filtered, % values != nan
npr_i_dc: profiles, intersection, % values != nan
nprf_i_dc = profiles, intersection, filtered, % values != nan
df_npg | df_npgf: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f = protein groups, per fraction
or npgf_f = protein groups, filtered, per fraction
df_npg_dc | df_npgf_dc: index: maps e.g. "Map1", "Map2",..., columns: fractions e.g. "03K", "06K", ...
npg_f_dc = protein groups, per fraction, % values != nan
or npgf_f_dc = protein groups, filtered, per fraction, % values != nan
"""
if self.acquisition == "SILAC - MQ":
df_index = self.df_index["Ratio H/L"]
df_01_stacked = self.df_01_stacked["normalized profile"]
elif self.acquisition.startswith("LFQ"):
df_index = self.df_index["LFQ intensity"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
elif self.acquisition == "Custom":
df_index = self.df_index["normalized profile"]
df_01_stacked = self.df_01_stacked["normalized profile"].replace(0, np.nan)
#unfiltered
npg_t = df_index.shape[0]
df_index_MapStacked = df_index.stack("Map")
npr_t = df_index_MapStacked.shape[0]/len(self.map_names)
npr_t_dc = 1-df_index_MapStacked.isna().sum().sum()/np.prod(df_index_MapStacked.shape)
#filtered
npgf_t = df_01_stacked.unstack(["Map", "Fraction"]).shape[0]
df_01_MapStacked = df_01_stacked.unstack("Fraction")
nprf_t = df_01_MapStacked.shape[0]/len(self.map_names)
nprf_t_dc = 1-df_01_MapStacked.isna().sum().sum()/np.prod(df_01_MapStacked.shape)
#unfiltered intersection
try:
df_index_intersection = df_index_MapStacked.groupby(level="Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_index_intersection = df_index_MapStacked.groupby(level="Protein IDs").filter(lambda x : len(x)==len(self.map_names))
npr_i = df_index_intersection.shape[0]/len(self.map_names)
npr_i_dc = 1-df_index_intersection.isna().sum().sum()/np.prod(df_index_intersection.shape)
npg_i = df_index_intersection.unstack("Map").shape[0]
#filtered intersection
try:
df_01_intersection = df_01_MapStacked.groupby(level = "Sequence").filter(lambda x : len(x)==len(self.map_names))
except:
df_01_intersection = df_01_MapStacked.groupby(level = "Protein IDs").filter(lambda x : len(x)==len(self.map_names))
nprf_i = df_01_intersection.shape[0]/len(self.map_names)
nprf_i_dc = 1-df_01_intersection.isna().sum().sum()/np.prod(df_01_intersection.shape)
npgf_i = df_01_intersection.unstack("Map").shape[0]
# summarize in dataframe and save to attribute
df_quantity_pr_pg = pd.DataFrame(
{
"filtering": pd.Series(["before filtering", "before filtering", "after filtering", "after filtering"], dtype=np.dtype("O")),
"type": pd.Series(["total", "intersection", "total", "intersection"], dtype=np.dtype("O")),
"number of protein groups": pd.Series([npg_t, npg_i, npgf_t, npgf_i], dtype=np.dtype("float")),
"number of profiles": pd.Series([npr_t, npr_i, nprf_t, nprf_i], dtype=np.dtype("float")),
"data completeness of profiles": pd.Series([npr_t_dc, npr_i_dc, nprf_t_dc, nprf_i_dc], dtype=np.dtype("float"))})
self.df_quantity_pr_pg = df_quantity_pr_pg.reset_index()
self.analysis_summary_dict["quantity: profiles/protein groups"] = self.df_quantity_pr_pg.to_json()
#additional depth assessment per fraction
dict_npgf = {}
dict_npg = {}
list_npg_dc = []
list_npgf_dc = []
for df_intersection in [df_index_intersection, df_01_intersection]:
for fraction in self.fractions:
df_intersection_frac = df_intersection[fraction]
npgF_f_dc = 1-df_intersection_frac.isna().sum()/len(df_intersection_frac)
npgF_f = df_intersection_frac.unstack("Map").isnull().sum(axis=1).value_counts()
if fraction not in dict_npg.keys():
dict_npg[fraction] = npgF_f
list_npg_dc.append(npgF_f_dc)
else:
dict_npgf[fraction] = npgF_f
list_npgf_dc.append(npgF_f_dc)
df_npg = pd.DataFrame(dict_npg)
df_npg.index.name = "Protein Groups present in:"
df_npg.rename_axis("Fraction", axis=1, inplace=True)
df_npg = df_npg.stack("Fraction").reset_index()
df_npg = df_npg.rename({0: "Protein Groups"}, axis=1)
df_npg.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
df_npgf = pd.DataFrame(dict_npgf)
df_npgf.index.name = "Protein Groups present in:"
df_npgf.rename_axis("Fraction", axis=1, inplace=True)
df_npgf = df_npgf.stack("Fraction").reset_index()
df_npgf = df_npgf.rename({0: "Protein Groups"}, axis=1)
df_npgf.sort_values(["Fraction", "Protein Groups present in:"], inplace=True, key=natsort_list_keys)
max_df_npg = df_npg["Protein Groups present in:"].max()
min_df_npg = df_npg["Protein Groups present in:"].min()
rename_numOFnans = {}
for x, y in zip(range(max_df_npg,min_df_npg-1, -1), range(max_df_npg+1)):
if y == 1:
rename_numOFnans[x] = "{} Map".format(y)
elif y == 0:
rename_numOFnans[x] = "PG not identified".format(y)
else:
rename_numOFnans[x] = "{} Maps".format(y)
for keys in rename_numOFnans.keys():
df_npg.loc[df_npg["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
df_npgf.loc[df_npgf["Protein Groups present in:"] ==keys, "Protein Groups present in:"] = rename_numOFnans[keys]
# summarize in dataframe and save to attributes
self.df_npg_dc = pd.DataFrame(
{
"Fraction" : pd.Series(self.fractions),
"Data completeness before filtering": pd.Series(list_npg_dc),
"Data completeness after filtering": pd.Series(list_npgf_dc),
})
self.df_npg = df_npg
self.df_npgf = df_npgf
def plot_quantity_profiles_proteinGroups(self):
"""
Args:
self:
df_quantity_pr_pg: df; no index, columns: "filtering", "type", "npg", "npr", "npr_dc"; further information: see above
Returns:
"""
df_quantity_pr_pg = self.df_quantity_pr_pg
layout = go.Layout(barmode="overlay",
xaxis_tickangle=90,
autosize=False,
width=300,
height=500,
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
#title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
mirror=True),
template="simple_white")
fig_npg = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npg.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of protein groups"],
name=t))
fig_npg.update_layout(layout, title="Number of Protein Groups", yaxis=go.layout.YAxis(title="Protein Groups"))
fig_npr = go.Figure()
for t in df_quantity_pr_pg["type"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["type"] == t]
fig_npr.add_trace(go.Bar(
x=plot_df["filtering"],
y=plot_df["number of profiles"],
name=t))
fig_npr.update_layout(layout, title="Number of Profiles")
df_quantity_pr_pg = df_quantity_pr_pg.sort_values("filtering")
fig_npr_dc = go.Figure()
for t in df_quantity_pr_pg["filtering"].unique():
plot_df = df_quantity_pr_pg[df_quantity_pr_pg["filtering"] == t]
fig_npr_dc.add_trace(go.Bar(
x=plot_df["type"],
y=plot_df["data completeness of profiles"],
name=t))
fig_npr_dc.update_layout(layout, title="Coverage", yaxis=go.layout.YAxis(title="Data completness"))
#fig_npr_dc.update_xaxes(tickangle=30)
fig_npg_F = px.bar(self.df_npg,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - before filtering",
width=500)
fig_npgf_F = px.bar(self.df_npgf,
x="Fraction",
y="Protein Groups",
color="Protein Groups present in:",
template="simple_white",
title = "Protein groups per fraction - after filtering",
width=500)
fig_npg_F_dc = go.Figure()
for data_type in ["Data completeness after filtering", "Data completeness before filtering"]:
fig_npg_F_dc.add_trace(go.Bar(
x=self.df_npg_dc["Fraction"],
y=self.df_npg_dc[data_type],
name=data_type))
fig_npg_F_dc.update_layout(layout, barmode="overlay", title="Data completeness per fraction", yaxis=go.layout.YAxis(title=""), height=450, width=600)
return fig_npg, fig_npr, fig_npr_dc, fig_npg_F, fig_npgf_F, fig_npg_F_dc
def perform_pca(self):
"""
PCA will be performed, using logarithmized data.
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"V-type proton ATP
df_log_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "log profile" originates from logarithmized "LFQ intensity"
and "Ratio H/L", respectively; additionally the columns "MS/MS count" and "Ratio H/L count|Ratio H/L variability [%]" are stored
as single level indices
df_01_stacked: dataframe, in which "MAP" and "Fraction" are stacked; data in the column "LFQ intensity" is 0-1 normalized and renamed to "normalized
profile"; the columns "normalized profile"" and "MS/MS count" are stored as single level indices; plotting is possible now
Returns:
self:
df_pca: df, PCA was performed, while keeping the information of the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Map" "Compartment"
df_pca_combined: df, PCA was performed across the Maps
columns: "PC1", "PC2", "PC3"
index: "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "Q-value", "Score", "id", "Compartment"
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are consistent
throughout all maps / coverage filtering.
"""
markerproteins = self.markerproteins
if self.acquisition == "SILAC - MQ":
df_01orlog_fracunstacked = self.df_log_stacked["log profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_log_stacked["log profile"].unstack(["Fraction", "Map"]).dropna()
elif self.acquisition.startswith("LFQ") or self.acquisition == "Custom":
df_01orlog_fracunstacked = self.df_01_stacked["normalized profile"].unstack("Fraction").dropna()
df_01orlog_MapFracUnstacked = self.df_01_stacked["normalized profile"].unstack(["Fraction", "Map"]).dropna()
pca = PCA(n_components=3)
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca = pd.DataFrame(pca.fit_transform(df_01orlog_fracunstacked))
df_pca.columns = ["PC1", "PC2", "PC3"]
df_pca.index = df_01orlog_fracunstacked.index
self.df_pca = df_pca.sort_index(level=["Gene names", "Compartment"])
# df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3"
df_pca_combined = pd.DataFrame(pca.fit_transform(df_01orlog_MapFracUnstacked))
df_pca_combined.columns = ["PC1", "PC2", "PC3"]
df_pca_combined.index = df_01orlog_MapFracUnstacked.index
self.df_pca_combined = df_pca_combined.sort_index(level=["Gene names", "Compartment"])
map_names = self.map_names
df_pca_all_marker_cluster_maps = pd.DataFrame()
df_pca_filtered = df_pca.unstack("Map").dropna()
for clusters in markerproteins:
for marker in markerproteins[clusters]:
try:
plot_try_pca = df_pca_filtered.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.append(
plot_try_pca)
if len(df_pca_all_marker_cluster_maps) == 0:
df_pca_all_marker_cluster_maps = df_pca_filtered.stack("Map")
else:
df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.stack("Map")
self.df_pca_all_marker_cluster_maps = df_pca_all_marker_cluster_maps.sort_index(level=["Gene names", "Compartment"])
def plot_global_pca(self, map_of_interest="Map1", cluster_of_interest="Proteasome", x_PCA="PC1", y_PCA="PC3", collapse_maps=False):
""""
PCA plot will be generated
Args:
self:
df_organellarMarkerSet: df, columns: "Gene names", "Compartment", no index
df_pca: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3",
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Map", "Compartment",
Returns:
pca_figure: global PCA plot
"""
if collapse_maps == False:
df_global_pca = self.df_pca.unstack("Map").swaplevel(0,1, axis=1)[map_of_interest].reset_index()
else:
df_global_pca = self.df_pca_combined.reset_index()
for i in self.markerproteins[cluster_of_interest]:
df_global_pca.loc[df_global_pca["Gene names"] == i, "Compartment"] = "Selection"
compartments = self.df_organellarMarkerSet["Compartment"].unique()
compartment_color = dict(zip(compartments, self.css_color))
compartment_color["Selection"] = "black"
compartment_color["undefined"] = "lightgrey"
fig_global_pca = px.scatter(data_frame=df_global_pca,
x=x_PCA,
y=y_PCA,
color="Compartment",
color_discrete_map=compartment_color,
title= "Protein subcellular localization by PCA for {}".format(map_of_interest)
if collapse_maps == False else "Protein subcellular localization by PCA of combined maps",
hover_data=["Protein IDs", "Gene names", "Compartment"],
template="simple_white",
opacity=0.9
)
return fig_global_pca
def plot_cluster_pca(self, cluster_of_interest="Proteasome"):
"""
PCA plot will be generated
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_pca_all_marker_cluster_maps: PCA processed dataframe, containing the columns "PC1", "PC2", "PC3", filtered for marker genes, that are
consistent throughout all maps / coverage filtering.
Returns:
pca_figure: PCA plot, for one protein cluster all maps are plotted
"""
df_pca_all_marker_cluster_maps = self.df_pca_all_marker_cluster_maps
map_names = self.map_names
markerproteins = self.markerproteins
try:
for maps in map_names:
df_setofproteins_PCA = pd.DataFrame()
for marker in markerproteins[cluster_of_interest]:
try:
plot_try_pca = df_pca_all_marker_cluster_maps.xs((marker, maps), level=["Gene names", "Map"],
drop_level=False)
except KeyError:
continue
df_setofproteins_PCA = df_setofproteins_PCA.append(plot_try_pca)
df_setofproteins_PCA.reset_index(inplace=True)
if maps == map_names[0]:
pca_figure = go.Figure(
data=[go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
)])
else:
pca_figure.add_trace(go.Scatter3d(x=df_setofproteins_PCA.PC1,
y=df_setofproteins_PCA.PC2,
z=df_setofproteins_PCA.PC3,
hovertext=df_setofproteins_PCA["Gene names"],
mode="markers",
name=maps
))
pca_figure.update_layout(autosize=False, width=500, height=500,
title="PCA plot for <br>the protein cluster: {}".format(cluster_of_interest),
template="simple_white")
return pca_figure
except:
return "This protein cluster was not quantified"
def calc_biological_precision(self):
"""
This function calculates the biological precision of all quantified protein clusters. It provides access to the data slice for all marker proteins, the distance profiles and the aggregated distances. It repeatedly applies the methods get_marker_proteins_unfiltered and calc_cluster_distances.
TODO: integrate optional arguments for calc_cluster_distances: complex_profile, distance_measure.
TODO: replace compatibiliy attributes with function return values and adjust attribute usage in downstream plotting functions.
Args:
self attributes:
markerproteins: dict, contains marker protein assignments
df_01_stacked: df, contains 0-1 nromalized data, required for execution of get_marker_proteins_unfiltered
Returns:
df_alldistances_individual_mapfracunstacked: df, distance profiles, fully unstacked
df_alldistances_aggregated_mapunstacked: df, profile distances (manhattan distance by default), fully unstacked
df_allclusters_01_unfiltered_mapfracunstacked: df, collected marker protein data
self attributes:
df_distance_noindex: compatibility version of df_alldistances_aggregated_mapunstacked
df_allclusters_01_unfiltered_mapfracunstacked
df_allclusters_clusterdist_fracunstacked_unfiltered: compatibility version of df_allclusters_01_unfiltered_mapfracunstacked (only used by quantificaiton_overview)
df_allclusters_clusterdist_fracunstacked: compatibility version of df_alldistances_individual_mapfracunstacked
genenames_sortedout_list = list of gene names with incomplete coverage
analysis_summary_dict entries:
"Manhattan distances" = df_distance_noindex
"Distances to the median profile": df_allclusters_clusterdist_fracunstacked, sorted and melted
"""
df_alldistances_individual_mapfracunstacked = pd.DataFrame()
df_alldistances_aggregated_mapunstacked = pd.DataFrame()
df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame()
for cluster in self.markerproteins.keys():
# collect data irrespective of coverage
df_cluster_unfiltered = self.get_marker_proteins_unfiltered(cluster)
df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked.append(df_cluster_unfiltered)
# filter for coverage and calculate distances
df_cluster = df_cluster_unfiltered.dropna()
if len(df_cluster) == 0:
continue
df_distances_aggregated, df_distances_individual = self.calc_cluster_distances(df_cluster)
df_alldistances_individual_mapfracunstacked = df_alldistances_individual_mapfracunstacked.append(df_distances_individual)
df_alldistances_aggregated_mapunstacked = df_alldistances_aggregated_mapunstacked.append(df_distances_aggregated)
if len(df_alldistances_individual_mapfracunstacked) == 0:
self.df_distance_noindex = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_01_unfiltered_mapfracunstacked = pd.DataFrame(columns = ["Gene names", "Map", "Cluster", "distance"])
self.df_allclusters_clusterdist_fracunstacked_unfiltered = pd.DataFrame(columns = ["Fraction"])
self.df_allclusters_clusterdist_fracunstacked = pd.DataFrame(columns = ["Fraction"])
self.genenames_sortedout_list = "No clusters found"
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
else:
df_alldistances_aggregated_mapunstacked.columns.name = "Map"
## Get compatibility with plotting functions, by mimicking assignment of old functions:
# old output of distance_calculation
self.df_distance_noindex = df_alldistances_aggregated_mapunstacked.stack("Map").reset_index().rename({0: "distance"}, axis=1)
self.analysis_summary_dict["Manhattan distances"] = self.df_distance_noindex.to_json()
# old output of multiple_iterations
# self.df_allclusters_clusterdist_fracunstacked_unfiltered --> this won't exist anymore, replaced by:
self.df_allclusters_01_unfiltered_mapfracunstacked = df_allclusters_01_unfiltered_mapfracunstacked
# kept for testing of quantification table:
self.df_allclusters_clusterdist_fracunstacked_unfiltered = df_allclusters_01_unfiltered_mapfracunstacked.stack("Map")
# same as before, but now already abs
self.df_allclusters_clusterdist_fracunstacked = df_alldistances_individual_mapfracunstacked.stack("Map")
df_dist_to_median = self.df_allclusters_clusterdist_fracunstacked.stack("Fraction")
df_dist_to_median.name = "distance"
df_dist_to_median = df_dist_to_median.reindex(index=natsort.natsorted(df_dist_to_median.index))
self.analysis_summary_dict["Distances to the median profile"] = df_dist_to_median.reset_index().to_json()
self.genenames_sortedout_list = [el for el in df_allclusters_01_unfiltered_mapfracunstacked.index.get_level_values("Gene names")
if el not in df_alldistances_individual_mapfracunstacked.index.get_level_values("Gene names")]
return df_alldistances_individual_mapfracunstacked, df_alldistances_aggregated_mapunstacked, df_allclusters_01_unfiltered_mapfracunstacked
def get_marker_proteins_unfiltered(self, cluster):
"""
This funciton retrieves the 0-1 normalized data for any given protein cluster, unfiltered for coverage.
Args:
cluster: str, cluster name, should be one of self.markerproteins.keys()
self attributes:
df_01_stacked: df, contains the fully stacked 0-1 normalized data
markerproteins: dict, contains marker protein assignments
Returns:
df_cluster_unfiltered: df, unfiltered data for the selected cluster, maps and fractions are unstacked.
self attribtues:
None
"""
df_in = self.df_01_stacked["normalized profile"].unstack("Fraction")
markers = self.markerproteins[cluster]
# retrieve marker proteins
df_cluster_unfiltered = pd.DataFrame()
for marker in markers:
try:
df_p = df_in.xs(marker, level="Gene names", axis=0, drop_level=False)
except:
continue
df_cluster_unfiltered = df_cluster_unfiltered.append(df_p)
if len(df_cluster_unfiltered) == 0:
return df_cluster_unfiltered
# Unstack maps and add Cluster to index
df_cluster_unfiltered = df_cluster_unfiltered.unstack("Map")
df_cluster_unfiltered.set_index(pd.Index(np.repeat(cluster, len(df_cluster_unfiltered)), name="Cluster"), append=True, inplace=True)
return df_cluster_unfiltered
def calc_cluster_distances(self, df_cluster, complex_profile=np.median, distance_measure="manhattan"):
"""
Calculates the absolute differences in each fraction and the profile distances relative to the center of a cluster.
Per default this is the manhattan distance to the median profile.
Args:
df_cluster: df, 0-1 normalized profiles of cluster members, should already be filtered for full coverage and be in full wide format.
complex_profile: fun, function provided to apply for calculating the reference profile, default: np.median.
distance_measure: str, selected distance measure to calculate. Currently only 'manhattan' is supported, everything else raises a ValueError.
self attributes:
None
Returns:
df_distances_aggregated: df, proteins x maps, if stacked distance column is currently named 0 but contains manhattan distances.
df_distances_individual: df, same shape as df_cluster, but now with absolute differences to the reference.
self attribtues:
None
"""
df_distances_aggregated = pd.DataFrame()
ref_profile = pd.DataFrame(df_cluster.apply(complex_profile, axis=0, result_type="expand")).T
df_distances_individual = df_cluster.apply(lambda x: np.abs(x-ref_profile.iloc[0,:]), axis=1)
# loop over maps
maps = set(df_cluster.columns.get_level_values("Map"))
for m in maps:
if distance_measure == "manhattan":
d_m = pw.manhattan_distances(df_cluster.xs(m, level="Map", axis=1), ref_profile.xs(m, level="Map", axis=1))
else:
raise ValueError(distance_measure)
d_m = pd.DataFrame(d_m, columns=[m], index=df_cluster.index)
df_distances_aggregated = pd.concat([df_distances_aggregated, d_m], axis=1)
df_distances_aggregated.columns.set_names(names="Map", inplace=True)
return df_distances_aggregated, df_distances_individual
def profiles_plot(self, map_of_interest="Map1", cluster_of_interest="Proteasome"):
"""
The function allows the plotting of filtered and normalized spatial proteomic data using plotly.express.
The median profile is also calculated based on the overlapping proteins. Profiles of proteins that are not quantified in all maps are dashed.
Args:
map_of_interest: str, must be in self.map_names
cluster_of_interest: str, must be in self.markerproteins.keys()
self attribtues:
df_allclusters_01_unfiltered_mapfracunstacked: df, contains 0-1 normalized profiles for all markerproteins detected in any map
Returns:
abundance_profiles_and_median_figure: plotly line plot, displaying the relative abundance profiles.
"""
try:
df_setofproteins = self.df_allclusters_01_unfiltered_mapfracunstacked.xs(cluster_of_interest, level="Cluster", axis=0)
df_setofproteins_median = df_setofproteins.dropna().xs(map_of_interest, level="Map", axis=1).median(axis=0)
# fractions get sorted
df_setofproteins = df_setofproteins.xs(map_of_interest, level="Map", axis=1).stack("Fraction")
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins.name = "normalized profile"
# make it available for plotting
df_setofproteins = df_setofproteins.reindex(index=natsort.natsorted(df_setofproteins.index))
df_setofproteins = df_setofproteins.reset_index()
abundance_profiles_figure = px.line(df_setofproteins,
x="Fraction",
y="normalized profile",
color="Gene names",
line_group="Sequence" if "Sequence" in df_setofproteins.columns else "Gene names",
template="simple_white",
title="Relative abundance profile for {} of <br>the protein cluster: {}".format(map_of_interest, cluster_of_interest)
)
df_setofproteins_median.name = "normalized profile"
#fractions get sorted
df_setofproteins_median = df_setofproteins_median.reindex(index=natsort.natsorted(df_setofproteins_median.index))
# make it available for plotting
df_setofproteins_median = df_setofproteins_median.reset_index()
df_setofproteins_median.insert(0, "Gene names", np.repeat("Median profile", len(df_setofproteins_median)))
abundance_profiles_and_median_figure = abundance_profiles_figure.add_scatter(x=df_setofproteins_median["Fraction"],
y=df_setofproteins_median["normalized profile"],
name="Median profile"
)
# dash lines for proteins that have insufficient coverage across maps
abundance_profiles_and_median_figure.for_each_trace(lambda x: x.update(line={"dash":"dash"}),
selector=lambda x: x.name in self.genenames_sortedout_list)
return abundance_profiles_and_median_figure
except:
return "This protein cluster was not quantified"
def quantification_overview(self, cluster_of_interest="Proteasome"):
"""
Args:
self.df_allclusters_clusterdist_fracunstacked_unfiltered
columns: 01K, 03K, 06K, 12K, 24K, 80K
index: Gene names, Protein IDs, C-Score, Q-value, Map, Compartment, Cluster
Returns:
df
"""
df_quantification_overview = self.df_allclusters_clusterdist_fracunstacked_unfiltered.xs(cluster_of_interest, level="Cluster", axis=0)\
[self.fractions[0]].unstack("Map")
if "Sequence" in df_quantification_overview.index.names:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i in ["Sequence","Gene names"]])
else:
df_quantification_overview = df_quantification_overview.droplevel([i for i in df_quantification_overview.index.names if not i=="Gene names"])
df_quantification_overview = df_quantification_overview.notnull().replace({True: "x", False: "-"})
return df_quantification_overview
def distance_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, and across all maps is generated displaying the distribution of the e.g.
Manhattan distance.
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
distance_boxplot_figure: boxplot. Along the x-axis the maps, along the y-axis the distances are shown
"""
map_names = self.map_names
df_distance_noindex = self.df_distance_noindex
# "Gene names", "Map", "Cluster" and transferred into the index
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
if "Sequence" in df_distance_map_cluster_gene_in_index.columns:
df_distance_map_cluster_gene_in_index.set_index("Sequence", append=True, inplace=True)
df_cluster_xmaps_distance_with_index = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_distance_map_cluster_gene_in_index" and appended to the new dataframe df_cluster_xmaps_distance_with_index
for maps in map_names:
plot_try = df_distance_map_cluster_gene_in_index.xs((cluster_of_interest, maps),
level=["Cluster", "Map"], drop_level=False)
df_cluster_xmaps_distance_with_index = df_cluster_xmaps_distance_with_index.append(plot_try)
df_cluster_xmaps_distance_with_index["Combined Maps"] = "Combined Maps"
#number of proteins within one cluster
self.proteins_quantified_across_all_maps = df_cluster_xmaps_distance_with_index.unstack("Map").shape[0]
# index will be reset, required by px.box
df_cluster_xmaps_distance = df_cluster_xmaps_distance_with_index.reset_index()
distance_boxplot_figure = go.Figure()
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Map"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.add_trace(go.Box(
x=df_cluster_xmaps_distance["Combined Maps"],
y=df_cluster_xmaps_distance["distance"],
boxpoints="all",
whiskerwidth=0.2,
marker_size=2,
hovertext=df_cluster_xmaps_distance["Gene names"]
))
distance_boxplot_figure.update_layout(
title="Manhattan distance distribution for <br>the protein cluster: {}".format(cluster_of_interest),
autosize=False,
showlegend=False,
width=500,
height=500,
# black box around the graph
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
title="Map",
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="distance",
mirror=True),
template="simple_white"
)
return distance_boxplot_figure
except:
self.cache_cluster_quantified = False
def distance_to_median_boxplot(self, cluster_of_interest="Proteasome"):
"""
A box plot for 1 desired cluster, across all maps and fractions is generated displaying the
distribution of the distance to the median. For each fraction, one box plot will be displayed.
Args:
self:
df_allclusters_clusterdist_fracunstacked, dataframe with single level column, stored as attribute
(self.allclusters_clusterdist_fracunstacked), in which "Fraction" is unstacked. It contains only the
normalized data of individual protein clusters substracted by the median of the respective protein cluster
for each fraction.
map_names: individual map names are stored as an index
Returns:
distance_to_median_boxplot_figure: Box plot. Along the x-axis, the maps are shown, along the y-axis
the distances is plotted
"""
df_boxplot_manymaps = pd.DataFrame()
try:
# for each individual map and a defined cluster data will be extracted from the dataframe
# "df_allclusters_clusterdist_fracunstacked" and appended to the new dataframe df_boxplot_manymaps
for maps in self.map_names:
plot_try = self.df_allclusters_clusterdist_fracunstacked.xs((cluster_of_interest, maps), level=["Cluster", "Map"], drop_level=False)
df_boxplot_manymaps = df_boxplot_manymaps.append(plot_try)
self.df_boxplot_manymaps = df_boxplot_manymaps
# index will be reset, required by px.violin
df_boxplot_manymaps = abs(df_boxplot_manymaps.stack("Fraction"))
df_boxplot_manymaps.name = "distance"
df_boxplot_manymaps = df_boxplot_manymaps.reindex(index=natsort.natsorted(df_boxplot_manymaps.index))
df_boxplot_manymaps = df_boxplot_manymaps.reset_index()
# box plot will be generated, every fraction will be displayed in a single plot
distance_to_median_boxplot_figure = px.box(df_boxplot_manymaps,
x="Map",
y="distance",
facet_col="Fraction",
facet_col_wrap=2,
boxmode="overlay", height=900, width=700, points="all",
hover_name="Gene names",
template="simple_white",
title="Distribution of the distance to the median for <br>the protein cluster: {}".format(cluster_of_interest))
return distance_to_median_boxplot_figure
except:
return "This protein cluster was not quantified"
def dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is calculated"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
df_setofproteins_allMaps = pd.DataFrame()
df_dynamicRange = pd.DataFrame()
df_01_stacked = self.df_01_stacked
for clusters in self.markerproteins:
try:
df_setofproteins_allMaps = pd.DataFrame()
for marker in self.markerproteins[clusters]:
try:
df_marker_allMaps = df_01_stacked.xs(marker, level="Gene names", drop_level=False)
except KeyError:
continue
df_setofproteins_allMaps = df_setofproteins_allMaps.append(df_marker_allMaps)
df_setofproteins_allMaps_median = df_setofproteins_allMaps["normalized profile"].unstack("Fraction").median()
df_dynamicRange = df_dynamicRange.append(pd.DataFrame(np.array([[max(df_setofproteins_allMaps_median),
min(df_setofproteins_allMaps_median),
max(df_setofproteins_allMaps_median)-min(df_setofproteins_allMaps_median),
clusters]]),
columns=["Max", "Min", "Dynamic Range", "Cluster"]),
ignore_index=True)
except:
continue
self.analysis_summary_dict["Dynamic Range"] = df_dynamicRange.to_json()
def plot_dynamic_range(self):
"""
Dynamic range of each individual protein clusters (of the median profile) across all maps is displayed"
Args:
self:
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
df_01_stacked: "MAP" and "Fraction" are stacked; the data in the column "normalized profile" is used for plotting. Additionally the columns
"MS/MS count" and "Ratio H/L count | Ratio H/L variability [%] | Ratio H/L" are found in LFQ and SILAC data respectively
Returns:
fig_dynamicRange: Bar plot, displaying the dynamic range for each protein cluster
self.df_dynamicRange: df, no index, columns: "Max", "Min", "Dynamic Range", "Cluster"
"""
fig_dynamicRange = px.bar(pd.read_json(self.analysis_summary_dict["Dynamic Range"]),
x="Cluster",
y="Dynamic Range",
base="Min",
template="simple_white",
width=1000,
height=500).update_xaxes(categoryorder="total ascending")
return fig_dynamicRange
def results_overview_table(self):
"""
Dataframe will be created, that provides information about "range", "mean" and "standardeviation",
given as the column names, based on the data given in df_distance_noindex
Args:
self:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset. It contains the column name "distance",
in which the e.g. Manhattan distances for each individual protein of the specified clusters (see self.markerproteins)
are stored
markerproteins: dictionary, key: cluster name, value: gene names (e.g. {"Proteasome" : ["PSMA1", "PSMA2",...], ...}
"""
df_distance_noindex = self.df_distance_noindex
df_distance_map_cluster_gene_in_index = df_distance_noindex.set_index(["Gene names", "Map", "Cluster"])
map_names = self.map_names
df_overview = pd.DataFrame()
for clusters in self.markerproteins:
#if a certain cluster is not available in the dataset at all
try:
for maps in map_names:
df_dist_map_cluster = df_distance_map_cluster_gene_in_index.xs((clusters, maps), level=["Cluster", "Map"], drop_level=False)
statistic_table = {"range": (df_dist_map_cluster["distance"].max(axis=0)) - (df_dist_map_cluster["distance"].min(axis=0)),
"median": df_dist_map_cluster["distance"].median(axis=0),
"standardeviation": df_dist_map_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": maps
}
statistic_series = pd.Series(data=statistic_table)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
df_dist_cluster = df_distance_map_cluster_gene_in_index.xs(clusters, level="Cluster")
statistic_table_combined = {
"range": (df_dist_cluster["distance"].max(axis=0)) - (df_dist_cluster["distance"].min(axis=0)),
"median": df_dist_cluster["distance"].median(axis=0),
"standardeviation": df_dist_cluster["distance"].std(axis=0),
"Cluster": clusters,
"Map": "combined maps"
}
statistic_series_combined = pd.Series(data=statistic_table_combined)
df_statistic_table_individual_cluster = pd.DataFrame(statistic_series_combined).T
df_overview = df_overview.append(df_statistic_table_individual_cluster)
except:
continue
try:
df_overview.set_index(["Cluster", "Map"], inplace=True)
df_overview.sort_index(axis=0, level=0, inplace=True)
except:
df_overview = pd.DataFrame()
self.analysis_summary_dict["Overview table"] = df_overview.reset_index().to_json()
self.analysed_datasets_dict[self.expname] = self.analysis_summary_dict.copy()
#self.analysis_summary_dict.clear()
return df_overview
def reframe_df_01ORlog_for_Perseus(self, df_01ORlog):
""""
To be available for Perseus df_01_stacked needs to be reframed.
Args:
df_01ORlog:
df_distance_noindex: stored as attribute (self.df_distance_noindex),index is reset.
It contains the column name "distance", in which the e.g. Manhattan distances for each individual protein
of the specified clusters (see self.markerproteins) are stored
map_names: individual map names are stored as an index
Returns:
df_01ORlog_svm:
LFQ:
columns: "MS/MS count_Map1_01K", "normalized profile_Map1_01K"
index: "Gene names", "Protein IDs", "C-Score", "Q-value", "Compartment"
SILAC:
columns: e.g. "Ratio H/L count_MAP2_80K", "Ratio H/L variability [%]_MAP1_03K", "normalized profile_MAP5_03K"
index: "Q-value", "Score", "Protein IDs", "Majority protein IDs", "Protein names", "Gene names", "id", "Compartment"
"""
df_01ORlog_svm = df_01ORlog.copy()
#df_01_filtered_combined = df_01_filtered_combined.stack(["Experiment", "Map"]).swaplevel(0,1, axis=0).dropna(axis=1)
index_ExpMap = df_01ORlog_svm.index.get_level_values("Map")+"_"+df_01ORlog_svm.index.get_level_values("Fraction")
index_ExpMap.name = "Map_Frac"
df_01ORlog_svm.set_index(index_ExpMap, append=True, inplace=True)
df_01ORlog_svm.index = df_01ORlog_svm.index.droplevel(["Map", "Fraction"])
df_01ORlog_svm = df_01ORlog_svm.unstack("Map_Frac")
#df_01ORlog_svm = df_01ORlog_svm.dropna(axis=0, subset=df_01ORlog_svm.loc[[], ["normalized profile"]].columns)
df_01ORlog_svm.columns = ["_".join(col) for col in df_01ORlog_svm.columns.values]
df_01ORlog_svm.rename(index={"undefined" : np.nan}, level="Compartment", inplace=True)
return df_01ORlog_svm
class SpatialDataSetComparison:
analysed_datasets_dict = SpatialDataSet.analysed_datasets_dict
css_color = SpatialDataSet.css_color
cache_stored_SVM = True
def __init__(self, ref_exp="Exp2", **kwargs): #clusters_for_ranking=["Proteasome", "Lysosome"]
#self.clusters_for_ranking = clusters_for_ranking
self.ref_exp = ref_exp
self.json_dict = {}
#self.fractions, self.map_names = [], [] #self.df_01_stacked, self.df_log_stacked = pd.DataFrame(), pd.DataFrame()
#collapse_maps,collapse_cluster, cluster_of_interest_comparison, multi_choice, multi_choice_venn, x_PCA_comp, y_PCA_comp
#if "organism" not in kwargs.keys():
# self.markerproteins = self.markerproteins_set["Human - Swissprot"]
#else:
# assert kwargs["organism"] in self.markerproteins_set.keys()
# self.markerproteins = self.markerproteins_set[kwargs["organism"]]
# del kwargs["organism"]
#self.unique_proteins_total = unique_proteins_total
self.exp_names, self.exp_map_names = [], []
self.df_01_filtered_combined, self.df_distance_comp = | pd.DataFrame() | pandas.DataFrame |
from tests import input_process, output_process, visualize
# from unittest import mock
# import scripts.main.clean_KF as KF
from scripts.main import process_data_file # noqa: F401
from scripts.main import process_output
import pandas as pd
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
'''
def test_process_data_file():
test_process = np.array([5])
file = pd.read_csv('test.csv'
assert type(process_data_file(file)) == type(test_process)
'''
def test_process_output():
'''
:param x: filterd/smoothed data
:param p: covariances_
:return: dataframe of processed results
'''
x = [[5,6]]
x = np.asarray(x)
p = np.array([5])
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.