prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = | tm.box_expected(td1, box_with_array, transpose=False) | pandas.util.testing.box_expected |
import os
import math
import torch
import torch.nn as nn
import traceback
import pandas as pd
import time
import numpy as np
import argparse
from utils.generic_utils import load_config, save_config_file
from utils.generic_utils import set_init_dict
from utils.generic_utils import NoamLR, binary_acc
from utils.generic_utils import save_best_checkpoint
from utils.tensorboard import TensorboardWriter
from utils.dataset import test_dataloader
from models.spiraconv import SpiraConvV1, SpiraConvV2
from utils.audio_processor import AudioProcessor
import random
# set random seed
random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
def test(criterion, ap, model, c, testloader, step, cuda, confusion_matrix=False):
padding_with_max_lenght = c.dataset['padding_with_max_lenght']
losses = []
accs = []
model.zero_grad()
model.eval()
loss = 0
acc = 0
preds = []
targets = []
with torch.no_grad():
for feature, target, slices, targets_org in testloader:
#try:
if cuda:
feature = feature.cuda()
target = target.cuda()
output = model(feature).float()
# output = torch.round(output * 10**4) / (10**4)
# Calculate loss
if not padding_with_max_lenght and not c.dataset['split_wav_using_overlapping']:
target = target[:, :output.shape[1],:target.shape[2]]
if c.dataset['split_wav_using_overlapping']:
# unpack overlapping for calculation loss and accuracy
if slices is not None and targets_org is not None:
idx = 0
new_output = []
new_target = []
for i in range(slices.size(0)):
num_samples = int(slices[i].cpu().numpy())
samples_output = output[idx:idx+num_samples]
output_mean = samples_output.mean()
samples_target = target[idx:idx+num_samples]
target_mean = samples_target.mean()
new_target.append(target_mean)
new_output.append(output_mean)
idx += num_samples
target = torch.stack(new_target, dim=0)
output = torch.stack(new_output, dim=0)
#print(target, targets_org)
if cuda:
output = output.cuda()
target = target.cuda()
targets_org = targets_org.cuda()
if not torch.equal(targets_org, target):
raise RuntimeError("Integrity problem during the unpack of the overlay for the calculation of accuracy and loss. Check the dataloader !!")
loss += criterion(output, target).item()
# calculate binnary accuracy
y_pred_tag = torch.round(output)
acc += (y_pred_tag == target).float().sum().item()
preds += y_pred_tag.reshape(-1).int().cpu().numpy().tolist()
targets += target.reshape(-1).int().cpu().numpy().tolist()
if confusion_matrix:
print("======== Confusion Matrix ==========")
y_target = | pd.Series(targets, name='Target') | pandas.Series |
import os
import time
import logging
import datetime
import pandas as pd
import pydicom as dicom
from pathlib import Path
from collections import defaultdict
from dicomweb_client.api import DICOMwebClient
from ._utils import *
try:
import progressbar as pg
except ImportError:
pg = None
has_progressbar = bool(pg)
class KheopsClient:
"""
The following keys specify the information collected in the
summary .csv files.
"""
STUDY_KEYS = ["StudyInstanceUID", "PatientID",
"StudyDate", "ModalitiesInStudy"]
SERIES_KEYS = ["StudyInstanceUID", "SeriesInstanceUID",
"PatientID", "SeriesDate", "Modality", "RetrieveURL"]
INSTANCE_KEYS = ["StudyInstanceUID", "SeriesInstanceUID", "SOPInstanceUID",
"PatientID", "SeriesDate", "Modality"]
MAX_ROWS_PRINTED = 25
def __init__(self,
url,
access_token,
out_dir="downloads",
dry_run=False,
show_progress=True,
verbosity=0):
self._token = self._check_token(access_token)
self._default_out_dir = "downloads" if out_dir is None else out_dir
self._dry_run = dry_run
self._show_progress = show_progress
self._client = DICOMwebClient(
url=url,
headers={"Authorization": "Bearer {}".format(self._token)}
)
self._setup_logger(verbosity=verbosity)
self._print_status()
def _check_token(self, token):
if token is None:
token = os.getenv("ACCESS_TOKEN", None)
if not token:
msg = (
"ERROR: No access token was provided for the Kheops DICOM\n"
" repository. Use argument 'token' or the environment\n"
" variable ACCESS_TOKEN to set a token. About tokens:\n"
" https://docs.kheops.online/docs/tokens")
print(msg)
exit(1)
return token
def _setup_logger(self, verbosity):
level = logging.WARNING
level_ext = logging.ERROR
verbosity = 0 if verbosity is None else verbosity
if verbosity == 1:
level = logging.INFO
elif verbosity == 2:
level = logging.DEBUG
level_ext = logging.WARNING
elif verbosity>= 3:
level = logging.DEBUG
level_ext = logging.DEBUG
for name in ["dicomweb_client", "pydicom"]:
_logger = logging.getLogger(name)
_logger.setLevel(level_ext)
self._logger = logging.getLogger("client")
self._logger.setLevel(level)
def _get_progress(self, size=None,
label="Processing...",
threaded=False,
suppress_progress=False):
if not has_progressbar:
class DummyBar:
def __init__(*args, **kwargs):
pass
def start(self, *args, **kwargs):
return self
def update(self, *args, **kwargs):
return self
def finish(self, *args, **kwargs):
return self
return DummyBar()
else:
widgets = []
if label:
widgets.append(pg.FormatLabel("%-15s" % label))
widgets.append(" ")
if size is not None and size>0:
digits = len(str(size))
fmt_counter = f"%(value){digits}d/{size}"
widgets.append(pg.Bar())
widgets.append(" ")
widgets.append(pg.Counter(fmt_counter))
widgets.append(" (")
widgets.append(pg.Percentage())
widgets.append(")")
else:
widgets.append(pg.BouncingBar())
show_bar = (self._show_progress and not suppress_progress)
ProgressBarType = pg.ProgressBar if show_bar else pg.NullBar
if threaded and show_bar:
from threading import Timer
class RepeatTimer(Timer):
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
class ThreadedProgressBar(ProgressBarType):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timer = RepeatTimer(interval=0.05,
function=self.update)
self.timer.setDaemon(True)
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
def start(self, *args, **kwargs):
ret = super().start(*args, **kwargs)
self.timer.start()
return ret
def finish(self, *args, **kwargs):
self.timer.cancel()
return super().finish(*args, **kwargs)
ProgressBarType = ThreadedProgressBar
progress = ProgressBarType(max_value=size,
widgets=widgets,
poll_interval=0.02)
return progress
def _ensure_ouput_dir(self, out_dir, forced=True):
out_dir = out_dir if out_dir is not None else self._default_out_dir
out_dir = Path(out_dir)
if out_dir is None:
msg = "No output directory was specified."
raise ValueError(msg)
if not ensure_dir(path=out_dir, forced=forced):
msg = "Failed to create output directory."
raise RuntimeError(msg)
return out_dir
def _print_status(self):
self._logger.info("Client configuration:")
self._logger.info(" URL: %s", self._client.base_url)
self._logger.info(" Port: %s", self._client.port)
self._logger.info(" Token: %s", self._token)
self._logger.info(" Dryrun: %s", str(self._dry_run).lower())
self._logger.info("")
def _print_table_summary(self, df):
empty = | pd.Series(dtype=str) | pandas.Series |
import pandas as pd
import numpy as np
import metapy
from ast import literal_eval
class Searcher:
def __init__(self):
self.movies = None
self.filtered_movies = None
self.searched_movies = None
def read_file(self, file_name):
self.movies = | pd.read_csv(file_name, low_memory=False) | pandas.read_csv |
''' Evaluation off calibration metrics'''
import argparse
import os
import os.path
import ipdb
import random
import pickle
import csv
import numpy as np
import pandas as pd
import numpy.random as np_rand
import sklearn.calibration as skcal
import sklearn.metrics as skmetrics
import sklearn.linear_model as sklm
import sklearn.isotonic as skiso
import lightgbm
import scipy
import matplotlib as mpl
mpl.use("PDF")
import matplotlib.pyplot as plt
import circews.functions.util.io as mlhc_io
import circews.functions.util.array as mlhc_array
import circews.functions.features as bern_features
def rescaled_calibration_curve(y_true,y_prob, correct_factor=None, n_bins=20):
''' Rescaling for prevalence version of the calibration curve'''
bin_locs=np.arange(y_prob.min(),y_prob.max(),0.05)
act_risks=[]
act_locs=[]
for thr in range(1,len(bin_locs)):
risk_lab=y_true[(y_prob>=bin_locs[thr-1]) & (y_prob<bin_locs[thr])]
if risk_lab.size==0:
continue
tps=np.sum(risk_lab==1.0)
fps=correct_factor*np.sum(risk_lab==0.0)
act_risk=tps/(tps+fps)
act_risks.append(act_risk)
act_locs.append((bin_locs[thr-1]+bin_locs[thr])/2.)
return (np.array(act_risks),np.array(act_locs))
def alarm_folder(model_desc,mimic_split_key):
if "top500" in model_desc:
return "shap_top500_features"
elif "MIMIC_BERN" in model_desc:
return "shap_top20_variables_MIMIC_BERN"
elif "MIMIConly" in model_desc:
return "shap_top20_variables_MIMIConly"
else:
return "shap_top20_variables_MIMIC"+"_no_subsample_val_on_{}".format(mimic_split_key)
def calibration_metrics(configs):
static_cols_without_encode=["Age","Height","Emergency"]
static_cols_one_hot_encode=["Surgical","APACHEPatGroup"]
static_cols_one_hot_encode_str=["Sex"]
str_to_int_dict={"M": 0, "F": 1, "U": 2}
random.seed(configs["random_state"])
np_rand.seed(configs["random_state"])
held_out=configs["val_type"]
cal_set=configs["calibration_set"]
dim_reduced_str=configs["data_mode"]
task_key=configs["task_key"]
left_hours=configs["lhours"]
right_hours=configs["rhours"]
assert(dim_reduced_str in ["reduced","non_reduced"])
threshold_dict={}
with open(configs["threshold_map"],'r') as fp:
csv_fp=csv.reader(fp)
next(csv_fp)
for _,split_key,recall,threshold,model in csv_fp:
if not float(recall)==configs["desired_recall"]:
continue
threshold_dict[("shap_"+model,split_key)]=float(threshold)
feat_order=None
if dim_reduced_str=="reduced":
dim_reduced_data=True
else:
dim_reduced_data=False
bern_batch_map=mlhc_io.load_pickle(configs["bern_pid_map_path"])["pid_to_chunk"]
mimic_batch_map=mlhc_io.load_pickle(configs["mimic_pid_map_path"])["pid_to_chunk"]
n_skipped_patients=0
scores_dict={}
labels_dict={}
cal_scores_dict={}
cal_labels_dict={}
all_labels=[("lightgbm", "shap_top20_variables_MIMIC_BERN","held_out",None),("lightgbm", "shap_top20_variables_MIMIC_BERN","temporal_1",None),
("lightgbm", "shap_top20_variables_MIMIC_BERN","temporal_2",None),("lightgbm", "shap_top20_variables_MIMIC_BERN","temporal_3",None),
("lightgbm", "shap_top20_variables_MIMIC_BERN","temporal_4",None),("lightgbm", "shap_top20_variables_MIMIC_BERN","temporal_5",None),
("lightgbm","shap_top500_features","held_out",None),("lightgbm","shap_top500_features","temporal_1",None),
("lightgbm","shap_top500_features","temporal_2",None),("lightgbm","shap_top500_features","temporal_3",None),
("lightgbm","shap_top500_features","temporal_4",None),("lightgbm","shap_top500_features","temporal_5",None)]
for ml_model, col_desc,split_key,mimic_split_key in all_labels:
if configs["only_alarm_points"]:
if "MIMIConly" in col_desc:
critical_score=threshold_dict[(col_desc.strip(), mimic_split_key)]
else:
critical_score=threshold_dict[(col_desc.strip(), split_key)]
configs["split_key"]=split_key
print("Analyzing model ({},{},{})".format(ml_model,col_desc, split_key))
cum_pred_scores=[]
cum_labels=[]
data_split=mlhc_io.load_pickle(configs["temporal_split_path"])[split_key]
if "MIMIC" in col_desc and "BERN" not in col_desc:
mimic_data_split=mlhc_io.load_pickle(configs["mimic_split_path"])[mimic_split_key]
if "MIMIC" in col_desc and "BERN" not in col_desc:
pred_pids=mimic_data_split[held_out]
else:
pred_pids=data_split[held_out]
if not configs["cohort_category"]=="global":
pid_maps={}
all_mapped_pids=[]
with open(configs["pid_category_map"],'r') as fp:
csv_fp=csv.reader(fp,delimiter='\t')
header_map={}
header_line=next(csv_fp)
for idx,field in enumerate(header_line):
if field.strip() not in ["Sex","AgeGroup_range","Emergency", "ApacheGroupName","Surgical", "ApacheScoreGroup_range"]:
continue
pid_maps[field.strip()]={}
header_map[field.strip()]=idx
for line in csv_fp:
pid=int(line[0])
all_mapped_pids.append(pid)
for k in header_map.keys():
pid_maps[k][pid]=line[header_map[k]].strip()
pred_pids=list(filter(lambda pid: pid in all_mapped_pids and pid_maps[configs["cohort_category"].strip()][pid]==configs["cohort_value"].strip(), pred_pids))
if not configs["cohort_category_fine"]=="global":
pred_pids=list(filter(lambda pid: pid in all_mapped_pids and pid_maps[configs["cohort_category_fine"].strip()][pid]==configs["cohort_value_fine"].strip(), pred_pids))
num_filtered_pids=len(pred_pids)
print("Number of filtered test PIDs: {}".format(num_filtered_pids))
if "MIMIConly" in col_desc:
calibration_pids=mimic_data_split[cal_set]
else:
calibration_pids=data_split[cal_set]
print("Number of test PIDs: {}".format(len(pred_pids)))
print("Number of calibration PIDs: {}".format(len(calibration_pids)))
output_dir=os.path.join(configs["predictions_dir"],"reduced",split_key,"{}_{}_{}_{}_{}".format(task_key, left_hours, right_hours, col_desc, ml_model))
if "MIMIConly" in col_desc:
feat_dir=os.path.join(configs["mimic_ml_input_dir"],"reduced",split_key,"AllLabels_0.0_8.0","X")
labels_dir=os.path.join(configs["mimic_ml_input_dir"],"reduced",split_key,"AllLabels_0.0_8.0","y")
impute_dir=os.path.join(configs["mimic_imputed_dir"], "reduced",split_key)
full_static_df=pd.read_hdf(os.path.join(impute_dir,"static.h5"),mode='r')
else:
feat_dir=os.path.join(configs["bern_ml_input_dir"],"reduced",split_key,"AllLabels_0.0_8.0","X")
labels_dir=os.path.join(configs["bern_ml_input_dir"],"reduced",split_key,"AllLabels_0.0_8.0","y")
impute_dir=os.path.join(configs["bern_imputed_dir"], "reduced",split_key)
full_static_df=pd.read_hdf(os.path.join(impute_dir,"static.h5"),mode='r')
output_dir=output_dir+"_full"
with open(os.path.join(output_dir,"best_model.pickle"),'rb') as fp:
orig_model=pickle.load(fp)
feat_order=list(filter(lambda col: "static_" not in col, orig_model._Booster.feature_name()))
for pidx,pid in enumerate(pred_pids):
if (pidx+1)%500==0 and configs["verbose"]:
print("{}/{}".format(pidx+1,len(pred_pids)))
if pidx>=100 and configs["debug_mode"]:
break
if "MIMIC" in col_desc and "BERN" not in col_desc:
batch_pat=mimic_batch_map[pid]
else:
batch_pat=bern_batch_map[pid]
try:
df_pred=pd.read_hdf(os.path.join(output_dir,"batch_{}.h5".format(batch_pat)), "/p{}".format(pid), mode='r')
df_pred=df_pred[pd.notna(df_pred["TrueLabel"]) & pd.notna(df_pred["PredScore"])]
except KeyError:
n_skipped_patients+=1
continue
if configs["only_alarm_points"]:
try:
alarm_path=alarm_folder(col_desc,mimic_split_key)
split_path=mimic_split_key if "MIMIConly" in col_desc else split_key
df_alarms=pd.read_hdf(os.path.join(configs["alarms_path"],alarm_path,split_path,"batch_{}.h5".format(batch_pat)),"/p{}".format(pid),mode='r')
except KeyError:
continue
df_im=pd.merge(df_pred,df_alarms,on=["RelDatetime"])
df_pred=df_im[df_im["Status"]=="Alarm"]
df_pred.rename(columns={"PredScore_x": "PredScore"},inplace=True)
pred_scores=np.array(df_pred["PredScore"])
true_labels=np.array(df_pred["TrueLabel"])
cum_pred_scores.append(pred_scores)
cum_labels.append(true_labels)
scores_dict[(ml_model,col_desc,split_key,mimic_split_key)]=np.concatenate(cum_pred_scores)
labels_dict[(ml_model,col_desc,split_key,mimic_split_key)]=np.concatenate(cum_labels)
cum_cal_scores=[]
cum_cal_labels=[]
if "MIMIConly" in col_desc:
df_shapelet_path=os.path.join(configs["shapelets_path"],"Shapelet_features_{}_MIMIC.h5".format(split_key))
else:
df_shapelet_path=os.path.join(configs["shapelets_path"],"Shapelet_features_{}.h5".format(split_key))
n_valid_count=0
for pidx,pid in enumerate(calibration_pids):
if (pidx+1)%500==0 and configs["verbose"]:
print("{}/{}".format(pidx+1,len(pred_pids)))
if pidx>=100 and configs["debug_mode"]:
break
if "MIMIConly" in col_desc:
batch_pat=mimic_batch_map[pid]
else:
batch_pat=bern_batch_map[pid]
try:
pat_df=pd.read_hdf(os.path.join(feat_dir,"batch_{}.h5".format(batch_pat)), "/{}".format(pid), mode='r')
pat_label_df=pd.read_hdf(os.path.join(labels_dir,"batch_{}.h5".format(batch_pat)), "/{}".format(pid),mode='r')
assert(pat_df.shape[0]==pat_label_df.shape[0])
df_feat_valid=pat_df[pat_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]
df_label_valid=pat_label_df[pat_label_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]
assert(df_feat_valid.shape[0]==df_label_valid.shape[0])
except KeyError:
continue
if df_feat_valid.shape[0]==0:
continue
shapelet_df=pd.read_hdf(df_shapelet_path, '/{}'.format(pid), mode='r')
shapelet_df["AbsDatetime"]=pd.to_datetime(shapelet_df["AbsDatetime"])
special_cols=["AbsDatetime","PatientID"]
shapelet_cols=list(filter(lambda col: "_dist-set" in col, sorted(shapelet_df.columns.values.tolist())))
shapelet_df=shapelet_df[special_cols+shapelet_cols]
if shapelet_df.shape[0]==0:
continue
df_merged= | pd.merge(df_feat_valid,shapelet_df,on=["AbsDatetime","PatientID"]) | pandas.merge |
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
import pypipegraph as ppg
from pathlib import Path
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Annotator
def DummyAnnotatable(name):
return DelayedDataFrame(
name,
lambda: pd.DataFrame(
{
"a": ["a", "b", "c", "d"],
"b": [1, 2, 3, 4],
"c": [200.1, 100.2, 400.3, 300.4],
}
),
)
def force_load(ddf):
ppg.JobGeneratingJob("shu", lambda: 55).depends_on(ddf.annotate())
class SequenceAnnotator(Annotator):
columns = ["sequence"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: range(0, len(df))})
class SequenceAnnotatorDuo(Annotator):
columns = ["sequenceDuo", "rev_sequenceDuo"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: range(0, len(df)), self.columns[1]: range(len(df), 0, -1)}
)
class SequenceAnnotatorDuoCollision(Annotator):
columns = ["shu", "rev_sequenceDuo"]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: range(0, len(df)), self.columns[1]: range(len(df), 0, -1)}
)
class FixedAnnotator(Annotator):
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def deps(self, ddf):
return ppg.ParameterInvariant(
ddf.name + "_" + self.columns[0], str(self.values)
)
def calc(self, df):
op = open("dummy.txt", "ab")
op.write(b"A")
op.close()
return pd.DataFrame({self.columns[0]: self.values[: len(df)]})
class FixedAnnotator2(Annotator): # used for conflict of annotator class tests
def __init__(self, column_name, values):
self.columns = [column_name]
self.values = values
def deps(self, ddf):
return ppg.ParameterInvariant(
ddf.name + "_" + self.column_name, str(self.values)
)
def annotate(self, annotat):
op = open("dummy.txt", "ab")
op.write(b"A")
op.close()
return pd.DataFrame({self.columns[0]: self.values[: len(annotat)]})
class BrokenAnnoDoesntCallAnnotatorInit(Annotator):
columns = ["shu"]
def calc(self, df):
return pd.DataFrame({self.column_name: range(0, len(df))})
class FakeAnnotator(object):
columns = ["shu"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: range(0, len(df))})
@pytest.mark.usefixtures("new_pipegraph")
class Test_FromOldGenomics:
def test_add_annotator_takes_only_annotators(self):
a = DummyAnnotatable("A")
with pytest.raises(TypeError):
a += 123
def test_non_anno_raises(self):
a = DummyAnnotatable("A")
with pytest.raises(TypeError):
a += FakeAnnotator()
def test_one_column_annotator(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_two_column_annotator(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotatorDuo()
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert (a.df["sequenceDuo"] == [0, 1, 2, 3]).all()
assert (a.df["rev_sequenceDuo"] == [4, 3, 2, 1]).all()
def test_two_differenct_annotators_with_identical_column_names_raise_on_adding(
self
):
a = DummyAnnotatable("A")
anno = SequenceAnnotatorDuo()
a.add_annotator(anno)
anno2 = SequenceAnnotatorDuoCollision()
a.add_annotator(anno2)
force_load(a)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
def test_annotator_copying_on_filter(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
def test_annotator_copying_on_filter_two_deep(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
second = even.filter("event2", lambda df: df["b"] == 4)
a.add_annotator(anno)
force_load(second)
ppg.run_pipegraph()
assert (second.df["b"] == [4]).all()
assert (second.df["sequence"] == [3]).all()
def test_annotator_copying_on_filter_with_anno(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["sequence"] % 2 == 0, annotators=[anno])
force_load(even)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [1, 3]).all()
assert (even.df["sequence"] == [0, 2]).all()
def test_no_anno_data_copying_if_no_annotate_dependency(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
def write():
op = open("dummy.txt", "wb")
op.write(b"SHU")
op.close()
ppg.FileGeneratingJob("dummy.txt", write).depends_on(even.load())
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert "sequence" not in even.df.columns
def test_anno_data_copying_if_add_annotator_dependency(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
def wf():
op = open("dummy.txt", "wb")
op.write(b"SHU")
op.close()
fg = ppg.FileGeneratingJob("dummy.txt", wf)
even.add_annotator(anno)
fg.depends_on(even.add_annotator(anno))
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
def test_annotator_copying_on_sort_and_top(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter(
"event", lambda df: df.sort_values("b", ascending=False)[:2].index
)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [4, 3]).all()
assert (even.df["sequence"] == [3, 2]).all()
def test_annotator_just_added_to_child(self):
a = DummyAnnotatable("A")
even = a.filter("event", lambda df: df["b"] % 2 == 0)
anno = SequenceAnnotator()
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
# after all, we add it anew.
assert (even.df["sequence"] == [0, 1]).all()
assert "sequence" not in a.df.columns
def test_annotator_first_added_to_parent_then_to_child(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_annotator_first_added_to_parent_then_to_second_child(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0).filter(
"shu", lambda df: df["b"] == 2
)
even.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2]).all()
assert (even.df["sequence"] == [1]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_annotator_first_added_to_child_then_to_parent(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
even.add_annotator(anno)
force_load(even)
a.add_annotator(anno)
force_load(a)
ppg.run_pipegraph()
assert "sequence" in even.df
assert "sequence" in a.df
def test_annotator_added_after_filtering(self):
a = DummyAnnotatable("A")
anno = SequenceAnnotator()
even = a.filter("event", lambda df: df["b"] % 2 == 0)
a.add_annotator(anno)
force_load(even)
ppg.run_pipegraph()
assert (even.df["b"] == [2, 4]).all()
assert (even.df["sequence"] == [1, 3]).all()
assert (a.df["sequence"] == [0, 1, 2, 3]).all()
def test_non_hashable_init__args(self):
a = FixedAnnotator("shu", ["h", "i", "j", "k"])
b = FixedAnnotator("shu", ["h", "i", "j", "k"])
assert a is b
def test_annotator_copying_parent_changed(self, new_pipegraph):
# first run
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "k"]).all()
assert Path("dummy.txt").read_text() == "A" # so it ran once...
new_pipegraph.new_pipegraph()
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "k"]).all()
assert Path("dummy.txt").read_text() == "A" # so it was not rerun
new_pipegraph.new_pipegraph()
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
force_load(even)
ppg.run_pipegraph()
assert (even.df["shu"] == ["i", "z"]).all()
assert Path("dummy.txt").read_text() == "AA" # so it was rerun
def test_filter_annotator_copy_nested(self):
# first run
a = DummyAnnotatable("A")
a.write()
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
anno2 = FixedAnnotator("shaw", ("a1", "b2", "c3", "d4"))
a.add_annotator(anno)
first = a.filter("first", lambda df: (df["a"] == "b") | (df["a"] == "d"))
second = first.filter("second", lambda df: ([True, True]))
third = second.filter("third", lambda df: (df["shu"] == "i"), annotators=[anno])
fourth = first.filter("fourth", lambda df: ([False, True]))
second.write()
fn_4 = fourth.write()[1]
a.add_annotator(anno2)
fourth.add_annotator(anno2)
force_load(first)
force_load(second)
force_load(third)
force_load(fourth)
ppg.run_pipegraph()
assert (first.df["shu"] == ["i", "k"]).all()
assert (first.df["parent_row"] == [1, 3]).all()
assert (first.df["shaw"] == ["b2", "d4"]).all()
assert (second.df["shu"] == ["i", "k"]).all()
assert (second.df["parent_row"] == [1, 3]).all()
assert (second.df["shaw"] == ["b2", "d4"]).all()
assert (third.df["shu"] == ["i"]).all()
assert (third.df["shaw"] == ["b2"]).all()
assert (third.df["parent_row"] == [1]).all()
assert (fourth.df["shu"] == ["k"]).all()
assert (fourth.df["parent_row"] == [3]).all()
assert (fourth.df["shaw"] == ["d4"]).all()
df = pd.read_csv(fn_4, sep="\t")
print(df)
assert (df["shaw"] == ["d4"]).all()
assert_frame_equal(df, fourth.df.reset_index(drop=True), check_less_precise=2)
def test_changing_anno_that_filtering_doesnt_care_about_does_not_retrigger_child_rebuild(
self, new_pipegraph
):
def count():
op = open("dummyZZ.txt", "ab")
op.write(b"A")
op.close()
fg = ppg.FileGeneratingJob("dummyZZ.txt", count)
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
fg.depends_on(even.load())
ppg.run_pipegraph()
Path("dummyZZ.txt").read_text() == "A" # so it ran once...
new_pipegraph.new_pipegraph()
fg = ppg.FileGeneratingJob("dummyZZ.txt", count)
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
even = a.filter("event", lambda df: df["b"] % 2 == 0)
fg.depends_on(even.load())
ppg.run_pipegraph()
Path("dummyZZ.txt").read_text() == "A" # so it was not rerun!
pass
def test_same_annotor_call_returns_same_object(self):
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
anno2 = FixedAnnotator("shu", ("h", "i", "j", "k"))
assert anno is anno2
def test_new_pipeline_invalidates_annotor_cache(self, new_pipegraph):
anno = FixedAnnotator("shu", ("h", "i", "j", "k"))
new_pipegraph.new_pipegraph()
anno2 = FixedAnnotator("shu", ("h", "i", "j", "k"))
assert anno is not anno2
def test_raises_on_same_column_name_differing_parameters(self):
a = DummyAnnotatable("A")
a += FixedAnnotator("shu", ("h", "i", "j", "k"))
with pytest.raises(ValueError):
a += FixedAnnotator("shu", ("h", "i", "j", "h"))
def test_raises_on_same_column_name_different_annotators(self):
a = DummyAnnotatable("A")
a += FixedAnnotator("shu", ("h", "i", "j", "k"))
with pytest.raises(ValueError):
a += FixedAnnotator2("shu", ("h", "i", "j", "k"))
def test_write(self):
a = DummyAnnotatable("A")
anno = FixedAnnotator("shu", ("h", "i", "j", "z"))
a.add_annotator(anno)
a.write(Path("shu.xls").absolute())
ppg.run_pipegraph()
df = pd.read_excel("shu.xls")
| assert_frame_equal(df, a.df, check_less_precise=2, check_dtype=False) | pandas.testing.assert_frame_equal |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = | pd.DataFrame() | pandas.DataFrame |
import tkinter as tk
import sys
from tkinter import filedialog
import random
import numpy as np
import pandas as pd
import math
import seaborn as sns
sys.path.append('Portplanering')
sys.path.append('Bilbokning/src')
from bilbokning import calculate_carriages
HEURISTICS = ['local_search',
'simulated_annealing',
'variable_neighbourhood_search',
'tabu_search']
NEIGHBOURHOODS = ['swap_port',
'swap_time',
'move_port',
'move_time']
zone_dict = {
0: 'TÄLT ',
1: 'FRIST ',
2: 'MPFTG\t',
3: 'MPBVV\t',
4: 'MPJÄR\t',
5: 'HPALL\t',
6: 'ADR ',
7: 'ENTEB\t',
8: 'ENTST\t'
}
# Function to change orderfile- Not to be used during testing
def browse_files():
filename = filedialog.askopenfilename(title = "Select a File",
filetypes = (("Text files",
"*.csv*"),
("all files",
"*.*")))
w.configure(text="File Opened: "+filename)
#----------------------------------FUNCTIONS-----------------------------------
# Global variables to be used for prints etc.
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
running = False
# Function when portplanering() is running
def portplanering():
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
COSTS1 = []
COSTS2 = []
from heuristic import run_heuristics
from generate_solution import generate_random_solution
from transportproblem_pulp import transportproblem
# Generate a feasible solution
y = generate_random_solution()
# Calculate the current cost
cost = transportproblem(y)[0]
best_cost = cost
best_solution = np.copy(y)
# Initial constans for SA and Tabu search
temp = 1000
tabu_list_max_len = 10
# Initial Tabu list for tabusearch
tabu_list = []
# Insert an initial word into the text
T.insert(tk.END, 10)
# Set neighbour to the chosen one through gui.
neighbour = chosen_neighbour.get()
local_opt = False
# running == True whenever the search for a heuristic is on
ctr = 0
while running:
ctr += 1
# Start a heuristic iteration
cost, y, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len, neighbour = \
run_heuristics(y, cost, chosen_heuristic.get(), neighbour, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len)
# Remove the previous output and insert the current cost
T.delete("1.0", "end")
T.insert(tk.END, cost)
# Generate a new random neighbourhood is condition is fulfilled.
if local_opt:
neighbour = NEIGHBOURHOODS[random.randrange(2)]
local_opt = False
m.update()
if ctr == 200:
running == False
break
def save_pic(cos, colname, filename):
df = pd.DataFrame([cos])
df = df.T
df.columns = colname
a = sns.lineplot(data=df[0:199])
figure = a.get_figure()
figure.savefig(filename+'.pdf')
figure.savefig(filename+'.png')
# function destroys window
def destroy_window():
m.destroy()
# If both Bilbokning and Portplanering is marked then, bilbokning will run first
# and then run Portplanering after.
def run_program():
# If Bilbokning is checked, it starts bilbokning
if bilv.get() == 1:
date=T.get("1.0", 'end-1c')
calculate_carriages(slid.get(), date)
d.configure(text="Date: "+date)
# If Portplanering is checked, it starts portplanering
if portv.get() == 1:
global running
# Sets global vaiable to True, means heuristic is running.
running = True
portplanering()
# Stop-button will not stop Bilbokning. Only heuristic search.
def stop_program():
from transportproblem_pulp import transportproblem
global running
global best_solution
if portv.get() == 1:
running = False
T.delete("1.0", "end")
# Calculate the cost of the best_solution found so far.
cost, x = transportproblem(best_solution)
# Print it in window and run solution_to_txt
T.insert(tk.END, 'Best solution found: ' + str(cost))
solution_to_txt(cost, x)
#------------------------------------------------------------------------------
# -------------------------------Window----------------------------------------
# Creates a window with every orders assigned ports
def view_solution():
L = pd.read_csv('Portplanering/Lj.csv')
number_of_orders = len(L)
J = range(number_of_orders)
import csv
def showSol():
top2 = tk.Toplevel()
with open('solution/'+str(chosen_order_list.get())+'.csv', newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
label = tk.Label(top2,
width = 10,
height = 2,
text = row,
relief = tk.RIDGE)
label.grid(row = r, column = c)
c += 1
r += 1
# Define buttons
top = tk.Toplevel()
top.title('Solution window')
chosen_order_list = tk.StringVar(top)
chosen_order_list.set(J[0])
op_menu_order = tk.OptionMenu(top, chosen_order_list, *J)
op_menu_order.pack()
button_open_solution = tk.Button(top,
text='Show solution',
command = showSol)
button_open_solution.pack()
# function creates a txtfile to view the current output in a textfile
def solution_to_txt(cost, x):
L = pd.read_csv('Portplanering/Lj.csv')
S = pd.read_csv('Portplanering/Sj.csv')
dij = pd.read_csv('Portplanering/dij.csv')
mj = pd.read_csv('Portplanering/mj.csv')
a = | pd.read_csv('Portplanering/aip.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 10:31:21 2020
Script tp discover if the given df to detect any features for the stiffness
or any other target is feasible
@author: nikorose
"""
import pandas as pd
import numpy as np
import csv
from tpot import TPOTRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA, KernelPCA
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import seaborn as sns
build_df = True
make_pred_TPOT = True
make_RF =False
reducing = False
writing = False
# =============================================================================
# Loading the data
# =============================================================================
input_horst = pd.read_csv('Horst/Horst_marker_distances_anthropometry.csv',index_col=0)
output_horst = pd.read_csv('Horst/Horst_reg_lines_raw.csv', index_col=[0,1])
idx = pd.IndexSlice
ERP = output_horst.loc[idx[:,'ERP'],:]
LRP = output_horst.loc[idx[:,'LRP'],:]
DP = output_horst.loc[idx[:,'DP'],:]
input_horst_amp = pd.DataFrame(np.empty((output_horst.shape[0],
input_horst.shape[1])), index=output_horst.index,
columns = input_horst.columns)
# =============================================================================
# functions
# =============================================================================
def RandomForest_pred(X_train, X_test, y_train, y_test, writing=False, n_iter=20):
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 50, stop = 2000, num = 60)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt', None]
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 200, num = 30)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 3, 4, 5, 10, 20]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4, 8]
# Method of selecting samples for training each tree
bootstrap = [True, False]
#n_componenst
n_comp = range(2,35)
gamma = range(1,25)
kernel = ['rbf', 'poly', 'sigmoid']
# Creating the random grid
random_grid = {'regressor__n_estimators': n_estimators,
'regressor__max_features': max_features,
'regressor__max_depth': max_depth,
'regressor__min_samples_split': min_samples_split,
'regressor__min_samples_leaf': min_samples_leaf,
'regressor__bootstrap': bootstrap,
'reducer__n_components': n_comp,
'reducer__gamma': gamma,
'reducer__kernel': kernel}
# Use the random grid to search for best hyperparameters
# First create the base model to tune
pipeline = Pipeline([('scaler', StandardScaler()),
('reducer', KernelPCA()),
('regressor', RandomForestRegressor(random_state = 42))])
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
rf_random = RandomizedSearchCV(estimator=pipeline, param_distributions=random_grid,
n_iter = n_iter, scoring='neg_mean_squared_error',
cv = 3, verbose=1, random_state=42, n_jobs=-1,
return_train_score=True)
rf_random.fit(X_train, y_train)
predicted_regression = pd.Series(rf_random.predict(X_test), name='Predicted', \
index=y_test.index)
#concatenating best params obtained with the accuracy
rmse= mean_squared_error(y_test, predicted_regression)
comp_table = | pd.concat([y_test, predicted_regression], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import numpy as np
from pandas import Series, DataFrame, Index, Float64Index
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
class TestFloatIndexers(tm.TestCase):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is spefically testing for the error
# message
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex, tm.makeIntIndex,
tm.makeRangeIndex]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
def f():
s.iloc[3.0]
self.assertRaisesRegexp(TypeError,
'cannot do positional indexing',
f)
def f():
s.iloc[3.0] = 0
self.assertRaises(TypeError, f)
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
i = index(5)
for s in [Series(
np.arange(len(i)), index=i), DataFrame(
np.random.randn(
len(i), len(i)), index=i, columns=i)]:
# getting
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.iloc, False),
(lambda x: x, True)]:
def f():
idxr(s)[3.0]
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
else:
error = TypeError
self.assertRaises(error, f)
# label based can be a TypeError or KeyError
def f():
s.loc[3.0]
if s.index.inferred_type in ['string', 'unicode', 'mixed']:
error = KeyError
else:
error = TypeError
self.assertRaises(error, f)
# contains
self.assertFalse(3.0 in s)
# setting with a float fails with iloc
def f():
s.iloc[3.0] = 0
self.assertRaises(TypeError, f)
# setting with an indexer
if s.index.inferred_type in ['categorical']:
# Value or Type Error
pass
elif s.index.inferred_type in ['datetime64', 'timedelta64',
'period']:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
# def f():
# idxr(s2)[3.0] = 0
# self.assertRaises(TypeError, f)
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
self.assertTrue(s2.index.is_object())
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
idxr(s2)[3.0] = 0
self.assertTrue(s2.index.is_object())
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
self.assertRaises(TypeError, lambda: s[3.0])
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
s3 = Series([1, 2, 3], index=['a', 'b', 1.5])
# lookup in a pure string index
# with an invalid indexer
for idxr in [lambda x: x.ix,
lambda x: x,
lambda x: x.iloc]:
def f():
idxr(s2)[1.0]
self.assertRaises(TypeError, f)
self.assertRaises(KeyError, lambda: s2.loc[1.0])
result = s2.loc['b']
expected = 2
self.assertEqual(result, expected)
# mixed index so we have label
# indexing
for idxr in [lambda x: x.ix,
lambda x: x]:
def f():
idxr(s3)[1.0]
self.assertRaises(TypeError, f)
result = idxr(s3)[1]
expected = 2
self.assertEqual(result, expected)
self.assertRaises(TypeError, lambda: s3.iloc[1.0])
self.assertRaises(KeyError, lambda: s3.loc[1.0])
result = s3.loc[1.5]
expected = 3
self.assertEqual(result, expected)
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
i = index(5)
for s in [Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)),
index=i, columns=i)]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
if isinstance(s, Series):
compare = self.assertEqual
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100,
index=range(len(s)), name=3)
else:
expected = Series(100.,
index=range(len(s)), name=3)
s2 = s.copy()
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
self.assertTrue(3.0 in s)
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.))
for s in [Series(np.arange(len(index)), index=index),
DataFrame(np.random.randn(len(index), len(index)),
index=index, columns=index)]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
# getting
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
def f():
idxr(s2)[indexer] = expected
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
self.assertRaises(KeyError, lambda: idxr(s)[3.5])
# contains
self.assertTrue(3.0 in s)
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
self.assertRaises(TypeError, lambda: s.iloc[3.0])
def g():
s2.iloc[3.0] = 0
self.assertRaises(TypeError, g)
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
index = index(5)
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l]
self.assertRaises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
idxr(s)[l]
self.assertRaises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l] = 0
self.assertRaises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
idxr(s)[l] = 0
self.assertRaises(TypeError, f)
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indiciates if we are out of bounds
# of positional indexing
for index, oob in [(tm.makeIntIndex(5), False),
(tm.makeRangeIndex(5), False),
(tm.makeIntIndex(5) + 10, True)]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
self.assertRaises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-6, 6),
slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[slice(-6.0, 6.0)]
self.assertRaises(TypeError, f)
# getitem odd floats
for l, res1 in [(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4))]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
self.assertRaises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
self.assertTrue((result == 0).all())
# positional indexing
def f():
s[l] = 0
self.assertRaises(TypeError, f)
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x,
lambda x: x.iloc]:
for l in [slice(2, 4.0),
slice(2.0, 4),
slice(2.0, 4.0)]:
def f():
idxr(s)[l]
self.assertRaises(TypeError, f)
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
index = index(5)
s = DataFrame(np.random.randn(5, 2), index=index)
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
# getitem
for l in [slice(0.0, 1),
slice(0, 1.0),
slice(0.0, 1.0)]:
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
self.assertRaises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-10, 10),
slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
def f():
s[slice(-10.0, 10.0)]
self.assertRaises(TypeError, f)
# getitem odd floats
for l, res in [(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2))]:
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
self.assertRaises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
self.assertTrue((result == 0).all())
# positional indexing
def f():
s[l] = 0
self.assertRaises(TypeError, f)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.)) + 0.1
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
# getitem
result = idxr(s)[l]
self.assertTrue(result.equals(expected))
# setitem
s2 = s.copy()
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
self.assertTrue((result == 0).all())
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
self.assertEqual(s[3], 2)
self.assertEqual(s.ix[3], 2)
self.assertEqual(s.loc[3], 2)
self.assertEqual(s.iloc[3], 3)
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.ix[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.ix[5.0]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
result1 = s[5]
result2 = s.loc[5]
result3 = s.ix[5]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
self.assertEqual(s[5.0], s[5])
# value not found (and no fallbacking at all)
# scalar integers
self.assertRaises(KeyError, lambda: s.loc[4])
self.assertRaises(KeyError, lambda: s.ix[4])
self.assertRaises(KeyError, lambda: s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.ix[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.ix[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.ix[2:5]
result2 = s.ix[2.0:5.0]
result3 = s.ix[2.0:5]
result4 = s.ix[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.ix[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.ix[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s[[1.6, 5, 10]]
result2 = s.loc[[1.6, 5, 10]]
result3 = s.ix[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
result1 = s[[0, 1, 2]]
result2 = s.ix[[0, 1, 2]]
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
| assert_series_equal(result1, result3) | pandas.util.testing.assert_series_equal |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = | pd.DataFrame({"a": [2], "b": [1]}) | pandas.DataFrame |
'''
Utility functions for running DeepSurv experiments
'''
import h5py
import scipy.stats as st
from collections import defaultdict
import numpy as np
import pandas as pd
import copy
import lasagne
def load_datasets(dataset_file):
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def format_dataset_to_df(dataset, duration_col, event_col, trt_idx=None):
xdf = pd.DataFrame(dataset['x'])
if trt_idx is not None:
xdf = xdf.rename(columns={trt_idx: 'treat'})
dt = pd.DataFrame(dataset['t'], columns=[duration_col])
censor = pd.DataFrame(dataset['e'], columns=[event_col])
cdf = | pd.concat([xdf, dt, censor], axis=1) | pandas.concat |
"""
Plotting for the Huys task. The functions have been written
for the object in resourceAllocator.py (gradient-free
optimization - either CMAES or Bayesian Optimization for
the equal precision model).
Also plots comparison of gradient-based with gradient-free
after the relevant simulation results have been produced.
"""
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib, matplotlib.pyplot as plt
sns.set()
def plot_table(obj, tableColour='mean'):
# Setting up memory columns other than resourceAllocVector
a = np.array([' left','right'])
s = np.arange(6) + 1
r = obj.env.rewards[:12]
s1 = ((obj.env.transitions[:12]+1)[:,1] - 6)
sa = [sa for sa in itertools.product(s,a)] * (obj.depth-1)
rs = list(zip(r,s1)) * (obj.depth-1)
q = obj.q[:-12]
d = {'(s,a)': sa, '(r,s\')': rs, 'q_mu(s,a)':q}
# Setting up resource allocation vectors for table of memories
nMems = obj.results.shape[-1]
means = np.transpose( np.mean(obj.results, axis=1) )
stds = np.transpose( np.std(obj.results, axis=1) )
cols = np.around( (np.array(obj.searchBudget) / \
(obj.depth * 2**obj.depth) * 100), decimals=1)
# Table of optimal resource allocation vectors for different search budgets
df = pd.DataFrame(d)
if len(means)==1: # if equal precision => broadcast
means = np.array(list(means)*len(df))
nMems = len(df)
dm = pd.DataFrame(means.astype(int), columns=cols)
df = | pd.merge(df, dm, left_index=True, right_index=True) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 23:40:50 2018
@author: austin
20181213 add .drop_duplicates() for Dataframe
20181215 add combine csv in funtino
20181220 skip index column when import
20181230 add GetDetail() function for 近30天内成交
"""
import requests
import re
from bs4 import BeautifulSoup,SoupStrainer
#import matplotlib.pyplot as plt
from fake_useragent import UserAgent
import time,random,sys
import pandas#pandas大法好
#ua=UserAgent()#使用随机header,模拟人类
#headers1={'User-Agent': 'ua.random'}#使用随机header,模拟人类
TotalPrice=[] #Total price
InitialPrice=[]
UnitPrice=[] #price per meter
HouseArea=[]
HouseHeight=[]
HouseConfig=[]
HouseCommunit=[]
HouseLocMajor=[]
HouseLocMinor=[]
HouseBuildYear=[]
HouseDealDate=[]
HouseDealCycle=[]
LinkUrl=[]
StrainerPriceInfo = SoupStrainer('a',attrs={'class':'nostyle'})
StrainerChengJiaoList = SoupStrainer('ul',attrs={'class':'listContent'})
StrainerTotalPage = SoupStrainer('div',attrs={'class':'page-box house-lst-page-box'}) #得到当前最大页数
PianQuList= ['北蔡', '碧云', '曹路', '川沙', '大团镇', '合庆', '高行', '高东', '花木', '航头', '惠南', '金桥', '金杨', '康桥', '陆家嘴', '老港镇', '临港新城', '联洋', '泥城镇', '南码头', '三林', '世博', '书院镇', '塘桥', '唐镇', '外高桥', '万祥镇', '潍坊', '宣桥', '新场', '御桥', '杨东', '源深', '洋泾', '张江', '祝桥', '周浦']
PianQuLink= ['/chengjiao/beicai/', '/chengjiao/biyun/', '/chengjiao/caolu/', '/chengjiao/chuansha/', '/chengjiao/datuanzhen/', '/chengjiao/geqing/', '/chengjiao/gaohang/', '/chengjiao/gaodong/', '/chengjiao/huamu/', '/chengjiao/hangtou/', '/chengjiao/huinan/', '/chengjiao/jinqiao/', '/chengjiao/jinyang/', '/chengjiao/kangqiao/', '/chengjiao/lujiazui/', '/chengjiao/laogangzhen/', '/chengjiao/lingangxincheng/', '/chengjiao/lianyang/', '/chengjiao/nichengzhen/', '/chengjiao/nanmatou/', '/chengjiao/sanlin/', '/chengjiao/shibo/', '/chengjiao/shuyuanzhen/', '/chengjiao/tangqiao/', '/chengjiao/tangzhen/', '/chengjiao/waigaoqiao/', '/chengjiao/wanxiangzhen/', '/chengjiao/weifang/', '/chengjiao/xuanqiao/', '/chengjiao/xinchang/', '/chengjiao/yuqiao1/', '/chengjiao/yangdong/', '/chengjiao/yuanshen/', '/chengjiao/yangjing/', '/chengjiao/zhangjiang/', '/chengjiao/zhuqiao/', '/chengjiao/zhoupu/']
#PianQuList=[]
#PianQuList.index('唐镇') #24
#PianQuLink[PianQuList.index('唐镇')] #'/chengjiao/tangzhen/'
MaxGetPage=1
PreFile='LianJia.csv'
TotalPage=MaxGetPage
HouseLocMajorString='浦东'
def SaveList():
df=pandas.DataFrame({'总价':TotalPrice,'单价':UnitPrice,'房型':HouseConfig,'成交日期':HouseDealDate,
'成交周期':HouseDealCycle,'面积':HouseArea,'小区':HouseCommunit,'楼层':HouseHeight,
'区':HouseLocMajor,'板块':HouseLocMinor,'初始报价':InitialPrice,'楼龄':HouseBuildYear,
'网址':LinkUrl})
datetimestr=time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
print('最新:'+datetimestr+' 得到:'+ str(len(df))+'条成交')
df.to_csv(datetimestr+'-'+HouseLocMajorString+'-LianJia.csv')
try:
if (len(PreFile) != 0):
#read_csv中engine参数的问题,默认是C engine,在读取中文标题时有可能会出错(在我这是必现)
#解决方法是将engine换为Python(官方文档的说法是C engine更快但是Python engine功能更完备)
#df2=pandas.read_csv(PreFile,encoding='ANSI',index_col=0,engine='python')
#跳过Index
df2=pandas.read_csv(PreFile,encoding='ANSI',index_col=False,engine='python',usecols=range(1,14))
print('前一次'+ PreFile +' 得到:'+ str(len(df2))+'条成交')
df= | pandas.concat([df,df2],ignore_index=True) | pandas.concat |
# LIBRARIES
import os
import pandas as pd
from siuba import group_by, ungroup, arrange, summarize, _
import numpy as np
import geopandas as gpd
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import contextily as ctx
from shapely.geometry import box, Polygon, MultiPolygon, GeometryCollection, shape
from shapely import wkt
from shapely.ops import transform
from babelgrid import Babel
from multiprocessing.pool import Pool
from functools import partial
from loguru import logger
## Coarse grid
def _get_lines(update_data = False):
"""
Get data frame of lines with count of jams per line and split number
"""
logger.info('Lines')
if update_data:
# Download data from Athena
logger.debug("Downloading lines")
conn = utils.connect_athena(path='configs/athena.yaml')
qry = f"""
select line_wkt, count(line_wkt) as count_lines
from spd_sdv_waze_corona.raw_sample_jams
group by line_wkt"""
df_lines = pd.read_sql_query(qry, conn)
df_lines.to_csv('/home/soniame/shared/spd-sdv-omitnik-waze/corona/geo_partition/lines/line_wkt_count_202010712.csv', index=False)
else:
# Read current table
logger.debug("Reading lines")
path_vs = '/home/soniame/shared/spd-sdv-omitnik-waze/corona/geo_partition/lines/line_wkt_count_202010712.csv'
logger.debug(f"From {path_vs}")
df_lines = pd.read_csv(path_vs)
logger.debug(f"L: {len(df_lines)}")
return(df_lines)
def _split_groups(df_lines, ng = 6):
"""
Split lines into same density groups
"""
size = len(df_lines)/ng
index_split = list()
for n in range(ng):
new_list = [n+1]*int(size)
index_split.extend(new_list)
len(index_split)
df_lines['split'] = index_split
return(df_lines)
def _line_to_coarse(line, tiles):
# list with logical value of grid tiles intersection per line
# Total length is the number of tiles
inter_list = list()
for tile in tiles:
# tile geometry
geom = tile.geometry.shapely
# intersection of tile geometry with line
inter_list.append(geom.intersection(wkt.loads(line)).is_empty == False)
# wkt assigned to each line
if sum(inter_list) == 0:
# In case there's no intersection
pos = None
t_wkt = ""
else:
pos = np.where(inter_list)[0].tolist()[0]
t_wkt = tiles[pos].geometry.wkt
result = {'line': line, 'coarse_wkt': t_wkt}
return(result)
def _create_coarse_grid(df_lines, tiles, split):
"""
The function creates de intersection between a H3 grid tiles and the lines in 50 sample dates.
It's split for parallelization purposes. Each split runns pero split
"""
logger.info('Create coarse grid')
if False:
# Lines done previously
prev = pd.read_csv("/home/soniame/private/projects/corona_geo_id/coarse_grid/coarse_id.csv"). \
rename(columns = {'line':'line_wkt'})
logger.debug(f'PL: {len(prev)}') # preview lines
# Elimination of lines already done
df_merge = df_lines.merge(prev, how='left')
df_merge = df_merge[df_merge.coarse_wkt.isnull() == True]
logger.debug(f'Lines done: {len(df_lines) - len(df_merge)}') # new lines
# Final lines
lines = df_lines.line_wkt
logger.debug(f'NL: {len(lines)}') # new lines
# Tiles H3
#tiles = Babel('h3').polyfill(geometry, resolution = 1)
logger.debug(f"Tiles: {len(tiles)}")
# Matching lines per tile
with Pool(10) as p:
r = p.map(partial(_line_to_coarse, tiles = tiles), lines)
df_coarse = pd.DataFrame(r)
logger.debug(f"UL: {df_coarse.shape[0]}") # update lines
# Locallty saved - Join is made at
# Notebook: notebooks/katana_bounds.ipynb#Split-lines-into-grid
path_vs = f"/home/soniame/private/projects/corona_geo_id/coarse_grid/coarse_id_new_{split}.csv"
logger.debug(f"To {path_vs}")
df_coarse.to_csv(path_vs, index = False)
return None
def create_coarse_grid(config, h3_resolution=2):
# Reading coarse grid
df_coarse = _get_coarse_grid(). \
rename(columns = {'line':'line_wkt'})
# Reading distribution
tab = | pd.read_csv('/home/soniame/shared/spd-sdv-omitnik-waze/corona/geo_partition/figures/coarse_grid_distribution.csv') | pandas.read_csv |
#!/usr/bin/env python3
import sys
import struct
import pandas as pd
import matplotlib
# Must be before importing matplotlib.pyplot or pylab!
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
###############################################
dsize = 16
###############################################
def getFrame(data, iter = None):
if iter is None:
return data
else:
return data[data.iter==iter]
def dirtyCls(data, iter = None):
df = getFrame(data, iter)
return sum(df.bits.apply(lambda x: sum(x)))
def dirtyPages(data, iter = None):
df = getFrame(data, iter)
return len(df.page)
def dirtyClsB(data, iter = None):
return dirtyCls(data, iter) * 64
def dirtyPagesB(data, iter = None):
return dirtyPages(data, iter) * 4096
def avgDirtyCls(data):
numIter = len(data.iter.unique())
return dirtyCls(data) / float(numIter)
def avgDirtyPages(data):
numIter = len(data.iter.unique())
return dirtyPages(data) / float(numIter)
def avgDirtyClsPerPage(data, iter = None):
df = getFrame(data, iter)
numPages = dirtyPages(df)
return dirtyCls(df) / float(numPages)
def getDirtyCLsPerPage(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
### skip iteration 0 because we set all cache lines to dirty in that iteration
iterFirst = meta.iter.iloc[1]
if iterLast is None:
iterLast = len(meta.iter)
dfF = pd.DataFrame({'cnt':[0]*64}, index=range(1,65))
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
df = pd.DataFrame({'cnt':map((lambda XX: sum(data.bits.apply(lambda x: sum(x)) == XX)), range(1, 65))}, index=range(1,65))
dfF = dfF+df
return dfF
def getDiffPagesClsB(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
iterFirst = meta.iter.iloc[0]
if iterLast is None:
iterLast = len(meta.iter)
df = pd.DataFrame()
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
dcl = dirtyClsB(data)
dp = dirtyPagesB(data)
df1 = pd.DataFrame({'iter':[i], 'dirtyCl':[dcl], 'dirtyP':[dp], 'amplif':[dp*1.0/dcl], 'pcnt':[dcl*100.0/dp]})
df = df.append(df1)
return df
def readBinFile(filename):
with open(filename, mode='rb') as file:
fileContent = file.read()
return fileContent
def getMetadata(fileContent):
first = 0
totalSize = len(fileContent)
meta=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
df1 = pd.DataFrame({'iter':[iter], 'count':[count], 'pos':[first]})
meta = meta.append(df1)
first = count * dsize + (first + dsize)
return meta
def getDataframeWBitlist(fileContent):
first = 0
totalSize = len(fileContent)
data= | pd.DataFrame() | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
from Swing.util.mplstyle import style1
import seaborn as sns
from palettable.colorbrewer.qualitative import Set1_3
def get_df(df, fp, min_lag, max_lag, td_window, inftype = "RandomForest"):
new_df = df[(df['file_path'] == fp) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == td_window) & (df['InfType'] == inftype)]
return(new_df)
def load_data():
input_folder_list = ["/projects/p20519/roller_output/gnw/RandomForest/"]
agg_df_RF = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_RF['InfType'] = 'RandomForest'
input_folder_list = ["/projects/p20519/roller_output/gnw/Dionesus/"]
agg_df_P = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_P['InfType'] = 'PLSR'
input_folder_list = ["/projects/p20519/roller_output/gnw/Lasso/"]
agg_df_L = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_L['InfType'] = 'Lasso'
all_dfs = [agg_df_RF, agg_df_P, agg_df_L]
merged_df = pd.concat(all_dfs)
return(merged_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
def get_inf_df(network_1, inf_type):
RFnet1 = network_1[network_1['InfType'] == inf_type]
RFn1 = RFnet1.groupby('td_window').mean()
return(RFn1)
def get_comparisons(merged_df, inftypes, window_sizes, network_list):
overall_df = pd.DataFrame()
network_1_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import PIL
import matplotlib.pyplot as plt
import matplotlib
import json
matplotlib.use('Agg')
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch import optim
from hzhu_gen import *
from hzhu_data import *
from hzhu_metrics_class import *
class NetLearn:
def __init__(
self,
net,
dataAll,
criterion,
optimizer_dict,
lr,
lr_min,
lr_factor,
epoch_max,
duration_max,
patience_reduce_lr,
patience_early_stop,
device,
metrics,
name,
path):
self.quickTimer = QuickTimer()
self.net = net
self.dataAll = dataAll
self.optimizer_dict = optimizer_dict
self.lr = lr
self.lr_min = lr_min
self.lr_factor = lr_factor
self.duration_max = duration_max
self.epoch_max = epoch_max
self.criterion = criterion
self.device = device
self.patience_reduce_lr = patience_reduce_lr
self.patience_early_stop = patience_early_stop
self.train_loss_list = []
self.valid_loss_list = []
self.test_loss_list = []
self.metrics_list = []
self.lr_list = []
self.name = name
self.path = path
self.ID = self.name+'_'+random_str()
self.epoch = 0
self.metrics = metrics
self.set_optimizer()
self.set_scheduler()
self.model_name = 'NET.pt'
self.optim_name = 'OPT.pt'
self.sched_name = 'SCH.pt'
self.create_save_path()
print('ID:', self.ID)
def set_optimizer(self):
self.optimizer = self.optimizer_dict['optimizer'](self.net.parameters(), lr=self.lr, **self.optimizer_dict['param'])
def set_scheduler(self):
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
mode='min',
factor=self.lr_factor,
patience=self.patience_reduce_lr,
eps=0,
verbose=False)
def train_iterate(self, dataLoader):
self.epoch += 1
self.net.train()
loss_list = []
for data in dataLoader:
X = data['cxr'].to(self.device).unsqueeze(1)
Y_class = data['Y'].to(self.device).long()
Y_saliency = data['gaze'].to(self.device).unsqueeze(1)
Y_saliency = Y_saliency/Y_saliency.sum(dim=(-2,-1), keepdim=True)
self.optimizer.zero_grad()
Y_class_pred, Y_saliency_pred = self.net(X)
Y_saliency_pred_shape = Y_saliency_pred.shape
Y_saliency_pred = F.log_softmax(Y_saliency_pred.flatten(start_dim=-2, end_dim=-1), dim=-1).reshape(Y_saliency_pred_shape)
net_list = self.net.compute_loss(
y_class_pred=Y_class_pred,
y_image_pred=Y_saliency_pred,
y_class_true=Y_class,
y_image_true=Y_saliency,
loss_class=self.criterion['class'],
loss_image_list=[self.criterion['saliency'],])
loss = net_list['loss_sum']
loss.backward()
self.optimizer.step()
loss_list.append(loss.detach().clone().cpu())
del data, X, Y_class, Y_saliency, Y_class_pred, Y_saliency_pred, net_list, loss
return loss_list
def eval_iterate(self, dataLoader):
self.net.eval()
loss_list = []
metrics_class = self.metrics['class']()
metrics_saliency = self.metrics['saliency']()
with torch.no_grad():
for data in dataLoader:
X = data['cxr'].to(self.device).unsqueeze(1)
Y_class = data['Y'].to(self.device).long()
Y_saliency = data['gaze'].to(self.device).unsqueeze(1)
Y_saliency = Y_saliency/Y_saliency.sum(dim=(-2,-1), keepdim=True)
Y_class_pred, Y_saliency_pred = self.net(X)
Y_saliency_pred_shape = Y_saliency_pred.shape
Y_saliency_pred = F.log_softmax(Y_saliency_pred.flatten(start_dim=-2, end_dim=-1), dim=-1).reshape(Y_saliency_pred_shape)
net_list = self.net.compute_loss(
y_class_pred=Y_class_pred,
y_image_pred=Y_saliency_pred,
y_class_true=Y_class,
y_image_true=Y_saliency,
loss_class=self.criterion['class'],
loss_image_list=[self.criterion['saliency'],])
metrics_class.add_data(Y_class, Y_class_pred)
metrics_saliency.add_data(Y=Y_saliency, Y_pred=Y_saliency_pred)
for item in net_list:
tmp = {}
tmp[item] = net_list[item].detach().clone().cpu()
loss_list.append(tmp)
del data, X, Y_class, Y_saliency, Y_class_pred, Y_saliency_pred, net_list
return {'metrics_class':metrics_class, 'metrics_saliency':metrics_saliency, 'loss': | pd.DataFrame(loss_list) | pandas.DataFrame |
from numpy import save
import pandas as pd
import os
from glob import glob
import numpy as np
from shutil import copy
import argparse
def seg_id_extract_pr1954(p):
return "PAIRED_" + "_".join(p.split("_")[:-1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seg_file", type=str, required=True)
args = parser.parse_args()
save_dir = "./easyocr_data"
os.makedirs(save_dir)
train_dir = os.path.join(save_dir, "pr_train")
val_dir = os.path.join(save_dir, "pr_val")
os.makedirs(train_dir)
os.makedirs(val_dir)
root_dir = "/srv/ocr/Japan/personnelrecords/deep-learning-pipeline/segment-annotations/v2-mapped/corrected_crops"
chars_dir = os.path.join(root_dir, "chars")
seg_dir = os.path.join(root_dir, "seg")
seg_paths = glob(os.path.join(seg_dir, "*.png"))
seg_basenames = [os.path.basename(x) for x in seg_paths]
seg_ids = [seg_id_extract_pr1954(p) for p in seg_basenames]
uni_seg_ids = sorted(list(set(seg_ids)))
"""
np.random.seed(99)
np.random.shuffle(uni_seg_ids)
EVAL_PCT = 0.3
val_seg_ids = uni_seg_ids[:int(EVAL_PCT*len(uni_seg_ids))]
train_seg_ids = uni_seg_ids[int(EVAL_PCT*len(uni_seg_ids)):]
"""
with open(args.seg_file) as f:
val_seg_ids = f.read().split()
train_seg_ids = [x for x in uni_seg_ids if not x in val_seg_ids]
train_seg_basenames = [x for x in seg_basenames if seg_id_extract_pr1954(x) in train_seg_ids]
val_seg_basenames = [x for x in seg_basenames if seg_id_extract_pr1954(x) in val_seg_ids]
print(f"Len val segs {len(val_seg_basenames)}; len train segs {len(train_seg_basenames)}")
train_labels = []
for sbname in train_seg_basenames:
seq_str = os.path.splitext(sbname)[0].split("_")[-1]
train_labels.append((sbname, seq_str))
copy(os.path.join(seg_dir, sbname), train_dir)
train_labels_df = pd.DataFrame(train_labels, columns=["filename", "words"])
train_labels_df.to_csv(os.path.join(train_dir, "labels.csv"))
val_labels = []
for sbname in val_seg_basenames:
seq_str = os.path.splitext(sbname)[0].split("_")[-1]
val_labels.append((sbname, seq_str))
copy(os.path.join(seg_dir, sbname), val_dir)
val_labels_df = | pd.DataFrame(val_labels, columns=["filename", "words"]) | pandas.DataFrame |
from transformers import AutoTokenizer, AutoModelForMaskedLM, \
GPT2LMHeadModel, GPT2Tokenizer, \
RobertaForMaskedLM, RobertaTokenizer, BertTokenizer, BertForMaskedLM, \
BartForConditionalGeneration, BartTokenizer, XLNetTokenizer, T5Tokenizer
import torch
import json
from utils.constant import CUDA_DEVICE, RELATION_FILES
import numpy as np
import matplotlib.pyplot as plt
import random
from prettytable import PrettyTable
import seaborn as sns
import os
import pandas as pd
from matplotlib import rcParams
import heapq
# 各个模型的存储位置
MODEL_PATH = {
# 'bert-base-uncased': '/share/model/bert/uncased_L-12_H-768_A-12',
# 'bert-base-cased': '/home/caoboxi/saved_models/bert-base-cased',
# 'bert-large-uncased': '/share/model/bert/uncased_L-24_H-1024_A-16',
# 'bert-large-cased': '/home/caoboxi/saved_models/bert-large-cased',
# 'bert-large-cased-wwm': '/home/caoboxi/saved_models/bert-large-cased-whole-word-masking',
# 'gpt2': '/home/caoboxi/saved_models/gpt2',
# "gpt2-medium": "/shared_home/caoboxi/models/saved_models/gpt2-medium",
# 'gpt2-large': '/home/caoboxi/saved_models/gpt2-large',
# 'gpt2-xl': '/home/caoboxi/saved_models/gpt2-xl',
# 'roberta-base': '/home/caoboxi/saved_models/roberta-base',
# 'roberta-large': '/home/caoboxi/saved_models/roberta-large',
'bart-large': 'facebook/bart-large',
# 'bart-base': '/home/caoboxi/saved_models/bart-base',
'bart-base': 'facebook/bart-base',
}
def build_model(model_name):
if model_name in MODEL_PATH:
model_path = MODEL_PATH[model_name]
else:
raise RuntimeError('model not exsit')
if model_prefix(model_name) == "bart":
tokenizer = BartTokenizer.from_pretrained(model_path)
model = BartForConditionalGeneration.from_pretrained(model_path, force_bos_token_to_be_generated=True)
elif model_prefix(model_name) == "gpt2":
tokenizer, model = build_gpt_model(model_name)
elif model_prefix(model_name) == "roberta":
tokenizer = RobertaTokenizer.from_pretrained(model_path)
model = RobertaForMaskedLM.from_pretrained(model_path)
elif model_prefix(model_name) == "bert":
tokenizer = BertTokenizer.from_pretrained(model_path, do_lower_case=False)
model = BertForMaskedLM.from_pretrained(model_path)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, do_lower_case=False)
model = AutoModelForMaskedLM.from_pretrained(model_path)
return tokenizer, model
def build_gpt_model(model_name):
if model_name in MODEL_PATH:
model_type = MODEL_PATH[model_name]
else:
raise RuntimeError('model not exsit')
tokenizer = GPT2Tokenizer.from_pretrained(model_type)
model = GPT2LMHeadModel.from_pretrained(model_type, return_dict=True)
tokenizer.pad_token = tokenizer.eos_token
return tokenizer, model
def batch_text(input_texts, batch_size=32, obj_tokens=None):
if obj_tokens is None:
batch_input_text = []
single_batch = []
for text in input_texts:
single_batch.append(text)
if len(single_batch) == batch_size:
batch_input_text.append(single_batch)
single_batch = []
if len(single_batch) > 0:
batch_input_text.append(single_batch)
return batch_input_text
else:
assert len(input_texts) == len(obj_tokens)
batch_input_text = []
batch_obj_tokens = []
single_batch = []
single_obj_batch = []
for text, obj in zip(input_texts, obj_tokens):
single_batch.append(text)
single_obj_batch.append(obj)
if len(single_batch) == batch_size:
batch_input_text.append(single_batch)
batch_obj_tokens.append(single_obj_batch)
single_batch = []
single_obj_batch = []
if len(single_batch) > 0:
batch_input_text.append(single_batch)
batch_obj_tokens.append(single_obj_batch)
return batch_input_text, batch_obj_tokens
def load_file(filename):
data = []
with open(filename, "r") as f:
for line in f:
data.append(json.loads(line))
f.close()
return data
def store_file(filename, data):
with open(filename, "w") as f:
n = len(data)
for i in range(n):
line = data[i]
if i != n-1:
f.write(json.dumps(line) + "\n")
else:
f.write(json.dumps(line))
f.close()
def load_json_dic(filename):
with open(filename, 'r') as f:
return json.load(f)
def store_json_dic(filename, dic):
with open(filename, 'w') as f:
json.dump(dic, f)
def load_roberta_vocab():
return load_json_dic("data/roberta_data/roberta_vocab.json")
def model_prefix(model_name):
return model_name.split("-")[0]
def filter_samples_by_vocab(samples, vocab):
filter_samples = []
for sample in samples:
sub, obj = get_pair(sample)
if obj in vocab:
filter_samples.append(sample)
return filter_samples, len(samples), len(filter_samples)
def get_relations(file_path='data/relations_with_trigger.jsonl'):
original_relations = load_file(file_path)
return original_relations
def score_to_result(score, topk, tokenizer, obj_token=None, rank_k=10000, roberta_vocab2idx=None):
score = torch.softmax(score, dim=-1)
predicted_prob, predicted_index = torch.topk(score, topk)
predicted_prob = predicted_prob.detach().cpu().numpy()
predicted_index = predicted_index.cpu().numpy().tolist()
if "roberta" in tokenizer.name_or_path:
predicted_tokens = []
for index in predicted_index:
predicted_tokens.append(tokenizer.decode(index).strip())
elif 'bert' in tokenizer.name_or_path:
predicted_tokens = tokenizer.convert_ids_to_tokens(predicted_index)
elif 'gpt' in tokenizer.name_or_path:
predicted_tokens = []
for index in predicted_index:
predicted_tokens.append(tokenizer.decode(index))
else:
raise RuntimeError('model not defined')
if obj_token is None:
return predicted_tokens, predicted_prob
else:
if "roberta" in tokenizer.name_or_path:
if roberta_vocab2idx is None:
raise RuntimeError("need to be fix")
obj_index = roberta_vocab2idx[obj_token]
obj_prob = score[obj_index].item()
else:
obj_index = tokenizer.convert_tokens_to_ids(obj_token)
obj_prob = score[obj_index].item()
rank_prob, rank_index = torch.topk(score, rank_k)
rank_index = rank_index.cpu().numpy().tolist()
if obj_index not in rank_index:
obj_rank = rank_k
mrr = 0
else:
obj_rank = rank_index.index(obj_index) + 1
mrr = 1 / obj_rank
return predicted_tokens, predicted_prob, obj_prob, obj_rank, mrr
def get_pair(sample, return_id=False):
while "sub_label" not in sample:
try:
sample = sample['sample']
except:
print(sample)
exit(0)
sub = sample['sub_label']
obj = sample['obj_label']
sub_id = sample["sub_uri"]
if return_id:
return sub, obj, sub_id
else:
return sub, obj
def mean_round(num, num_len, r=2):
return round(num * 100 / num_len, r)
def divide_samples_by_ans(samples):
true_samples = []
false_samples = []
for sample in samples:
if sample['predict_ans'] is True:
true_samples.append(sample)
else:
false_samples.append(sample)
return true_samples, false_samples
def box_plot(ax, data, labels=None):
ax.boxplot(data, labels=labels)
plt.show()
def get_relation_args(args):
infos = RELATION_FILES[args.relation_type]
args.relation_file = infos['relation_file']
args.sample_dir = infos['sample_dir']
args.sample_file_type = infos["sample_file_type"]
return args
def get_bert_vocab(model_name):
tokenizer = BertTokenizer.from_pretrained(model_name)
vocab = tokenizer.get_vocab()
return vocab
def set_seed(seed_num=1023):
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
def get_table_stat(table: PrettyTable, return_cols=False, return_mean=False):
rows = table._rows
mean_row = []
median_row = []
up_quantile_row = []
down_quantile_row = []
std_row = []
cols = []
if len(rows) == 0:
return table
for j in range(len(rows[0])):
cols.append([row[j] for row in rows])
for col in cols:
if type(col[0]) == str:
mean_row.append('mean')
median_row.append('median')
std_row.append('std')
up_quantile_row.append('up_quantile')
down_quantile_row.append('down_quantile')
else:
mean = round(float(np.mean(col)), 2)
mean_row.append(mean)
median = round(float(np.median(col)), 2)
median_row.append(median)
std = round(float(np.std(col)), 2)
std_row.append(std)
up_quantile = round(float(np.quantile(col, 0.25)), 2)
up_quantile_row.append(up_quantile)
down_quantile = round(float(np.quantile(col, 0.75)), 2)
down_quantile_row.append(down_quantile)
table.add_row(mean_row)
table.add_row(up_quantile_row)
table.add_row(median_row)
table.add_row(down_quantile_row)
table.add_row(std_row)
if return_cols is False and return_mean is False:
return table
if return_cols is True:
return table, cols
if return_mean is True:
return table, mean_row, std_row
def draw_heat_map(data, row_labels, col_labels,
pic_dir='pics/paper_pic/head_or_tail', pic_name='all_samples'):
plt.figure(figsize=(8, 2))
sns.set_theme()
ax = sns.heatmap(data=data,
center=0,
annot=True, fmt='.2f',
xticklabels=row_labels,
yticklabels=col_labels)
if not os.path.isdir(pic_dir):
os.mkdir(pic_dir)
plt.tight_layout()
fig = ax.get_figure()
fig.savefig('{}/{}.eps'.format(pic_dir, pic_name), format='eps')
plt.show()
def draw_box_plot(corrs, pic_name, pic_dir, ylim=None, hor=True):
data = {"prompt": [], "corr": []}
for prompt in corrs:
for corr in corrs[prompt]:
data["prompt"].append(prompt)
data["corr"].append(corr)
pd_data = pd.DataFrame(data)
sns.set_theme(style="whitegrid")
if hor is True:
flatui = ["#d6ecfa"]
ax = sns.boxplot(
x="corr", y="prompt",
data=pd_data, orient='h', width=.6,
boxprops={'color': '#404040',
'facecolor': '#d6ecfa'
}
)
else:
ax = sns.boxplot(
x="prompt", y="corr",
data=pd_data, width=.3,
palette="Set2"
)
ax.set_ylabel("")
ax.set_xlabel("")
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
for line in ax.get_lines():
line.set_color("#404040")
set_plt()
if ylim is not None:
ax.set(ylim=ylim)
if not os.path.isdir(pic_dir):
os.makedirs(pic_dir)
fig = ax.get_figure()
fig.savefig('{}/{}.eps'.format(pic_dir, pic_name), format='eps')
def draw_corr_scatter(data, pic_name, pic_dir, prompt="T_{man}"):
pd_data = pd.DataFrame(data)
pd_data = pd_data[pd_data["prompts"] == prompt]
print(pd_data)
ax = sns.regplot(x="kl", y="precision", data=pd_data)
print("mean: {}".format(pd_data["kl"].mean()))
def draw_context_box_plot(true_p, false_p, obj_true_p, obj_false_p):
data = {"prediction": [], "context": [], "precision": []}
for p in true_p:
data["precision"].append(p)
data["prediction"].append("right")
data["context"].append("mask obj")
for p in false_p:
data["precision"].append(p)
data["prediction"].append("false")
data["context"].append("mask obj")
for p in obj_true_p:
data["precision"].append(p)
data["prediction"].append("right")
data["context"].append("obj only")
for p in obj_false_p:
data["precision"].append(p)
data["prediction"].append("false")
data["context"].append("obj only")
pd_data = | pd.DataFrame(data) | pandas.DataFrame |
from __future__ import absolute_import, print_function
import os
import pandas as pd
import numpy as np
from .BaseStructProtocol import BaseStructProtocol
from codifyComplexes.CodifyComplexException import CodifyComplexException
from computeFeatures.seqStep.seqToolManager import SeqToolManager
AA_CODE_ELEMENTS= SeqToolManager.AA_CODE_ELEMENTS
'''
(feature_name, path_to_dir, columns ). If columns==None, all columns will be used
Structural features must come first as sequential single chains features muy contain more aminoacids
(e.g. non 3D-solved residues)
'''
FEATURES_TO_INCLUDE_CHAIN= [
("psaia", ("structStep/PSAIA/procPSAIA", None)),
("halfSphereExpos", ("structStep/halfSphereExpos", None)),
("dssp", ("structStep/DSSP", [3])),
("al2co", ("seqStep/conservation/al2co",None)),
("winPssms", ("seqStep/conservation/pssms/windowedPSSMs/wSize11", None)),
("winSeq", ("seqStep/slidingWinSeq11", None))
]
FEATURES_TO_INCLUDE_PAIR= [
("corrMut", ("seqStep/conservation/corrMut", None)),
]
class StructProtocol(BaseStructProtocol):
'''
This class implements structural voronoi environment codification
'''
def __init__(self, dataRootPath, cMapPath, prevStepPaths=None, singleChainfeatsToInclude= FEATURES_TO_INCLUDE_CHAIN,
pairfeatsToInclude=FEATURES_TO_INCLUDE_PAIR, verbose=False):
'''
:param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:
computedFeatures/
common/
contactMaps/
seqStep/
conservation/
...
structStep/
PSAIA/
VORONOI/
...
:param cMapPath: str. A path to a dir that contains the contact map of the protein complex
:param prevStepPaths: str or str[]. A path to previous results files directory. If it is None, contactMaps files will be used
to define which residue pairs are in contact. Can also be a str[] if multiple feedback_path's
wanted
'''
BaseStructProtocol.__init__(self, dataRootPath, cMapPath, prevStepPaths,
singleChainfeatsToInclude=FEATURES_TO_INCLUDE_CHAIN,
pairfeatsToInclude= FEATURES_TO_INCLUDE_PAIR, verbose= verbose)
def loadSingleChainFeatures(self, prefixOneChainType, chainType):
'''
@overrides BaseStructProtocol method to make use of sequence profiles (loaded directly) and struct
neighbour but not computing struct neighbours on non central residue features of sliding window
Loads all features files computed for ligand or receptor chains. Returns a pandas.DataFrame
that contains in each row all features from all files for each amino acid. Just amino acids
that appears in each file will be included. Others will be ruled out (intersection)
:param prefixOneChainType: str. A prefixOneChainType that identifies the receptor or ligand
:param chainType: str. "l" for ligand and "r" for receptor
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainId%s', 'resId%s', 'resName%s', [properties] #no defined order for properties
%s is L if chainType=="l" and R if chainType=="r"
'''
#super (BaseStructProtocol,self) is AbstractProtocol
singleChainFeats= super(BaseStructProtocol,self).loadSingleChainFeatures( prefixOneChainType, chainType) #Load with no aggregation
chainType= chainType.upper()
winSize= max([ int(elem.split(".")[-1][:-1]) for elem in singleChainFeats.columns if elem.startswith("pssmWin") ])+1
centralRes= winSize//2
#find variables that will not be considered for structural aggreation: sliding window features of non central amino acids
selectedSeqEntr= set([ 'informationWin.%d.%d%s'%(i, centralRes, chainType) for i in range(2)])
selectedPssm= set([ 'pssmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])
selectedPsfm= set([ 'psfmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])
selectedWinAA= set([ 'aaWin.0.%d_dummy_%s%s'%(centralRes,letter, chainType) for letter in AA_CODE_ELEMENTS ])
#this variables will be aggregated
centralResCols= selectedSeqEntr.union(selectedPssm).union(selectedPsfm).union(selectedWinAA)
winCols= set([col for col in singleChainFeats.columns if not "ggr" in col and "Win" in col ])
#this variables will not be aggreaged
allWinButCentralCols= winCols.difference(centralResCols)
allButWinData= singleChainFeats[ [col for col in singleChainFeats.columns if not col in allWinButCentralCols] ]
winData= singleChainFeats[ list(singleChainFeats.columns[:3])+[col for col in singleChainFeats.columns if col in allWinButCentralCols] ]
# print( list( allButWinData.columns) );raw_input("enter")
singleChainFeats= self.addSingleChainAggregation(allButWinData, chainType)
mergeOn= [ elem%chainType.upper() for elem in ["chainId%s", "resId%s", "resName%s"] ]
singleChainFeats= | pd.merge(singleChainFeats, winData, how='inner', on=mergeOn) | pandas.merge |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import re
import warnings
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import os
import platform
from .condition_fun import *
from .info_value import *
# converting vector (breaks & special_values) to dataframe
def split_vec_todf(vec):
'''
Create a dataframe based on provided vector.
Split the rows that including '%,%' into multiple rows.
Replace 'missing' by np.nan.
Params
------
vec: list
Returns
------
pandas.DataFrame
returns a dataframe with three columns
{'bin_chr':orginal vec, 'rowid':index of vec, 'value':splited vec}
'''
if vec is not None:
vec = [str(i) for i in vec]
a = pd.DataFrame({'bin_chr':vec}).assign(rowid=lambda x:x.index)
b = pd.DataFrame([i.split('%,%') for i in vec], index=vec)\
.stack().replace('missing', np.nan) \
.reset_index(name='value')\
.rename(columns={'level_0':'bin_chr'})[['bin_chr','value']]
# return
return pd.merge(a,b,on='bin_chr')
def add_missing_spl_val(dtm, breaks, spl_val):
'''
add missing to spl_val if there is nan in dtm.value and
missing is not specified in breaks and spl_val
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
list
returns spl_val list
'''
if dtm.value.isnull().any():
if breaks is None:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
elif any([('missing' in str(i)) for i in breaks]):
spl_val=spl_val
else:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in i) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
# return
return spl_val
# count number of good or bad in y
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
# split dtm into bin_sv and dtm (without speical_values)
def dtm_binning_sv(dtm, breaks, spl_val):
'''
Split the orginal dtm (melt dataframe) into
binning_sv (binning of special_values) and
a new dtm (without special_values).
Params
------
dtm: melt dataframe
spl_val: speical values list
Returns
------
list
returns a list with binning_sv and dtm
'''
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if spl_val is not None:
# special_values from vector to dataframe
sv_df = split_vec_todf(spl_val)
# value
if is_numeric_dtype(dtm['value']):
sv_df['value'] = sv_df['value'].astype(dtm['value'].dtypes)
# sv_df['bin_chr'] = sv_df['bin_chr'].astype(dtm['value'].dtypes).astype(str)
sv_df['bin_chr'] = np.where(
np.isnan(sv_df['value']), sv_df['bin_chr'],
sv_df['value'].astype(dtm['value'].dtypes).astype(str))
# sv_df = sv_df.assign(value = lambda x: x.value.astype(dtm['value'].dtypes))
# dtm_sv & dtm
dtm_sv = pd.merge(dtm.fillna("missing"), sv_df[['value']].fillna("missing"), how='inner', on='value', right_index=True)
dtm = dtm[~dtm.index.isin(dtm_sv.index)].reset_index() if len(dtm_sv.index) < len(dtm.index) else None
# dtm_sv = dtm.query('value in {}'.format(sv_df['value'].tolist()))
# dtm = dtm.query('value not in {}'.format(sv_df['value'].tolist()))
if dtm_sv.shape[0] == 0:
return {'binning_sv':None, 'dtm':dtm}
# binning_sv
binning_sv = pd.merge(
dtm_sv.fillna('missing').groupby(['variable','value'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'}),
sv_df.fillna('missing'),
on='value'
).groupby(['variable', 'rowid', 'bin_chr']).agg({'bad':sum,'good':sum})\
.reset_index().rename(columns={'bin_chr':'bin'})\
.drop('rowid', axis=1)
else:
binning_sv = None
# return
return {'binning_sv':binning_sv, 'dtm':dtm}
# check empty bins for unmeric variable
def check_empty_bins(dtm, binning):
# check empty bins
bin_list = np.unique(dtm.bin.astype(str)).tolist()
if 'nan' in bin_list:
bin_list.remove('nan')
binleft = set([re.match(r'\[(.+),(.+)\)', i).group(1) for i in bin_list]).difference(set(['-inf', 'inf']))
binright = set([re.match(r'\[(.+),(.+)\)', i).group(2) for i in bin_list]).difference(set(['-inf', 'inf']))
if binleft != binright:
bstbrks = sorted(list(map(float, ['-inf'] + list(binright) + ['inf'])))
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# warnings.warn("The break points are modified into '[{}]'. There are empty bins based on the provided break points.".format(','.join(binright)))
# binning
# dtm['bin'] = dtm['bin'].astype(str)
# return
return binning
# required in woebin2 # return binning if breaks provided
#' @import data.table
def woebin2_breaks(dtm, breaks, spl_val):
'''
get binning if breaks is provided
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
DataFrame
returns a binning datafram
'''
# breaks from vector to dataframe
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'binning':None}
# binning
if is_numeric_dtype(dtm['value']):
# best breaks
bstbrks = ['-inf'] + list(set(bk_df.value.tolist()).difference(set([np.nan, '-inf', 'inf', 'Inf', '-Inf']))) + ['inf']
bstbrks = sorted(list(map(float, bstbrks)))
# cut
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
dtm['bin'] = dtm['bin'].astype(str)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
binning = check_empty_bins(dtm, binning)
# sort bin
binning = pd.merge(
binning.assign(value=lambda x: [float(re.search(r"^\[(.*),(.*)\)", i).group(2)) if i != 'nan' else np.nan for i in binning['bin']] ),
bk_df.assign(value=lambda x: x.value.astype(float)),
how='left',on='value'
).sort_values(by="rowid").reset_index(drop=True)
# merge binning and bk_df if nan isin value
if bk_df['value'].isnull().any():
binning = binning.assign(bin=lambda x: [i if i != 'nan' else 'missing' for i in x['bin']])\
.fillna('missing').groupby(['variable','rowid'])\
.agg({'bin':lambda x: '%,%'.join(x), 'good':sum, 'bad':sum})\
.reset_index()
else:
# merge binning with bk_df
binning = pd.merge(
dtm,
bk_df.assign(bin=lambda x: x.bin_chr),
how='left', on='value'
).fillna('missing').groupby(['variable', 'rowid', 'bin'])['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.reset_index().drop('rowid', axis=1)
# return
return {'binning_sv':binning_sv, 'binning':binning}
# required in woebin2_init_bin # return pretty breakpoints
def pretty(low, high, n):
'''
pretty breakpoints, the same as pretty function in R
Params
------
low: minimal value
low: maximal value
n: number of intervals
Returns
------
numpy.ndarray
returns a breakpoints array
'''
# nicenumber
def nicenumber(x):
exp = np.trunc(np.log10(abs(x)))
f = abs(x) / 10**exp
if f < 1.5:
nf = 1.
elif f < 3.:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
return np.sign(x) * nf * 10.**exp
# pretty breakpoints
d = abs(nicenumber((high-low)/(n-1)))
miny = np.floor(low / d) * d
maxy = np.ceil (high / d) * d
return np.arange(miny, maxy+0.5*d, d)
# required in woebin2 # return initial binning
def woebin2_init_bin(dtm, init_count_distr, breaks, spl_val):
'''
initial binning
Params
------
dtm: melt dataframe
init_count_distr: the minimal precentage in the fine binning process
breaks: breaks
breaks: breaks list
spl_val: speical values list
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'initial_binning':None}
# binning
if is_numeric_dtype(dtm['value']): # numeric variable
xvalue = dtm['value'].astype(float)
# breaks vector & outlier
iq = xvalue.quantile([0.25, 0.5, 0.75])
iqr = iq[0.75] - iq[0.25]
xvalue_rm_outlier = xvalue if iqr == 0 else xvalue[(xvalue >= iq[0.25]-3*iqr) & (xvalue <= iq[0.75]+3*iqr)]
# number of initial binning
n = np.trunc(1/init_count_distr)
len_uniq_x = len(np.unique(xvalue_rm_outlier))
if len_uniq_x < n: n = len_uniq_x
# initial breaks
brk = np.unique(xvalue_rm_outlier) if len_uniq_x < 10 else pretty(min(xvalue_rm_outlier), max(xvalue_rm_outlier), n)
brk = list(filter(lambda x: x>np.nanmin(xvalue) and x<np.nanmax(xvalue), brk))
brk = [float('-inf')] + sorted(brk) + [float('inf')]
# initial binning datatable
# cut
labels = ['[{},{})'.format(brk[i], brk[i+1]) for i in range(len(brk)-1)]
dtm.loc[:,'bin'] = | pd.cut(dtm['value'], brk, right=False, labels=labels) | pandas.cut |
"""
data_curation_functions.py
Extract Kevin's functions for curation of public datasets
Modify them to match Jonathan's curation methods in notebook
01/30/2020
"""
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import seaborn as sns
import pdb
from atomsci.ddm.utils.struct_utils import base_smiles_from_smiles
import atomsci.ddm.utils.datastore_functions as dsf
#from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.utils import curate_data as curate
import atomsci.ddm.utils.struct_utils as struct_utils
import atomsci.ddm.utils.curate_data as curate_data, imp
def set_data_root(dir):
global data_root, data_dirs
data_root = dir
#data_dirs = dict(ChEMBL = '%s/ChEMBL' % data_root, DTC = '%s/DTC' % data_root,
# Excape = '%s/Excape' % data_root)
data_dirs = dict(DTC = '%s/dtc' % data_root,
Excape = '%s/excape' % data_root)
log_var_map = {
'IC50': 'pIC50',
'AC50': 'pIC50',
'Solubility': 'logSolubility',
'CL': 'logCL'
}
pub_dsets = dict(
CYP2D6 = dict(IC50='cyp2d6'),
CYP3A4 = dict(IC50='cyp3a4'),
JAK1 = dict(IC50="jak1"),
JAK2 = dict(IC50="jak2"),
JAK3 = dict(IC50="jak3"),
)
# ----------------------------------------------------------------------------------------------------------------------
# Generic functions for all datasets
# ----------------------------------------------------------------------------------------------------------------------
# Note: Functions freq_table and labeled_freq_table have been moved to ddm.utils.curate_data module.
# ----------------------------------------------------------------------------------------------------------------------
def standardize_relations(dset_df, db='DTC'):
"""
Standardize the censoring operators to =, < or >, and remove any rows whose operators
don't map to a standard one.
"""
relation_cols = dict(ChEMBL='Standard Relation', DTC='standard_relation')
rel_col = relation_cols[db]
dset_df[rel_col].fillna('=', inplace=True)
ops = dset_df[rel_col].values
if db == 'ChEMBL':
# Remove annoying quotes around operators
ops = [op.lstrip("'").rstrip("'") for op in ops]
op_dict = {
">": ">",
">=": ">",
"<": "<",
"<=": "<",
"=": "="
}
ops = np.array([op_dict.get(op, "@") for op in ops])
dset_df[rel_col] = ops
dset_df = dset_df[dset_df[rel_col] != "@"]
return dset_df
# ----------------------------------------------------------------------------------------------------------------------
# DTC-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
"""
Upload a raw dataset to the datastore from the given data frame.
Returns the datastore OID of the uploaded dataset.
"""
def upload_file_dtc_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
'''
# ----------------------------------------------------------------------------------------------------------------------
def get_dtc_jak_smiles():
"""
Use PubChem REST API to download SMILES strings for InChi strings in DTC JAK123 data table
"""
jak_file = "%s/jak123_dtc.csv" % data_dirs['DTC']
dset_df = pd.read_csv(jak_file, index_col=False)
jak_dtc_df = jak_dtc_df[~jak_dtc_df.standard_inchi_key.isna()]
inchi_keys = sorted(set(jak_dtc_df.standard_inchi_key.values))
smiles_df, fail_list, discard_list = pu.download_smiles(inchi_keys)
smiles_df.to_csv('%s/jak123_inchi_smiles.csv' % data_dirs['DTC'], index=False)
# ----------------------------------------------------------------------------------------------------------------------
'''
def filter_dtc_data(orig_df,geneNames):
"""
Extract JAK1, 2 and 3 datasets from Drug Target Commons database, filtered for data usability.
"""
# filter criteria:
# gene_names == JAK1 | JAK2 | JAK3
# InChi key not missing
# standard_type IC50
# units NM
# standard_relation mappable to =, < or >
# wildtype_or_mutant != 'mutated'
# valid SMILES
# maps to valid RDKit base SMILES
# standard_value not missing
# pIC50 > 3
#--------------------------------------------------
# Filter dataset on existing columns
dset_df = orig_df[orig_df.gene_names.isin(geneNames) &
~(orig_df.standard_inchi_key.isna()) &
(orig_df.standard_type == 'IC50') &
(orig_df.standard_units == 'NM') &
~orig_df.standard_value.isna() &
~orig_df.compound_id.isna() &
(orig_df.wildtype_or_mutant != 'mutated') ]
return dset_df
def ic50topic50(x) :
return -np.log10((x/1000000000.0))
def down_select(df,kv_lst) :
for k,v in kv_lst :
df=df[df[k]==v]
return df
def get_smiles_dtc_data(nm_df,targ_lst,save_smiles_df):
save_df={}
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
#to ignore censored data
#save_df[targ]=jak1_df
#to include censored data
save_df[targ]=jak1_df_tmp
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df=pd.concat(lst)
# Add pIC50 values
print('Add pIC50 values.')
shared_df['PIC50']=shared_df['standard_value'].apply(ic50topic50)
# Merge in SMILES strings
print('Merge in SMILES strings.')
smiles_lst=[]
for targ in targ_lst :
df=save_df[targ]
df['PIC50']=df['standard_value'].apply(ic50topic50)
smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['smiles'].apply(struct_utils.base_smiles_from_smiles)
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print(smiles_df.shape)
print(smiles_df['standard_inchi_key'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys
def get_smiles_4dtc_data(nm_df,targ_lst,save_smiles_df):
save_df={}
description_str = ""
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
description = '''
# '''+targ+" distinct compounds = only: "+str(jak1_df['standard_inchi_key'].nunique())+'''
# '''+targ+" distinct compounds <,>,=: "+str(jak1_df_tmp['standard_inchi_key'].nunique())
description_str += description
#to ignore censored data
#save_df[targ]=jak1_df
#to include censored data
save_df[targ]=jak1_df_tmp
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df= | pd.concat(lst) | pandas.concat |
# -*- coding: utf-8 -*-
import sys
sys.path.append('../train_code')
import numpy as np
import pandas as pd
from utils.utils import *
from train_config import args
from sklearn.neighbors import NearestNeighbors
import joblib
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def f(x):
x = | pd.Series(x) | pandas.Series |
import os
import pandas as pd
import numpy as np
import logging
import wget
import time
import pickle
from src.features import preset
from src.features import featurizer
from src.data.utils import LOG
from matminer.data_retrieval.retrieve_MP import MPDataRetrieval
from tqdm import tqdm
from pathlib import Path
from src.data.get_data_MP import data_MP
import dotenv
def featurize_by_material_id(material_ids: np.array,
featurizerObject: featurizer.extendedMODFeaturizer,
MAPI_KEY: str,
writeToFile: bool = True) -> pd.DataFrame:
""" Run all of the preset featurizers on the input dataframe.
Arguments:
df: the input dataframe with a `"structure"` column
containing `pymatgen.Structure` objects.
Returns:
The featurized DataFrame.
"""
def apply_featurizers(criterion, properties, mpdr, featurizerObject):
LOG.info("Downloading dos and bandstructure objects..")
timeDownloadStart = time.time()
df_portion = mpdr.get_dataframe(criteria=criterion, properties=properties)
timeDownloadEnd = time.time()
LOG.info(df_portion)
df_time, df_portion = featurizerObject.featurize(df_portion)
df_time["download_objects"] = [timeDownloadEnd-timeDownloadStart]
return df_time, df_portion
properties = ["material_id","full_formula", "bandstructure", "dos", "structure"]
mpdr = MPDataRetrieval(MAPI_KEY)
steps = 1
leftover = len(material_ids)%steps
df = pd.DataFrame({})
df_timers = pd.DataFrame({})
for i in tqdm(range(0,len(material_ids),steps)):
portionReturned = True
if not (i+steps > len(material_ids)):
LOG.info(list(material_ids[i:i+steps]))
criteria = {"task_id":{"$in":list(material_ids[i:i+steps])}}
while (portionReturned):
try:
df_time, df_portion = apply_featurizers(criteria, properties, mpdr, featurizerObject)
portionReturned = False
except:
LOG.info("Except - try again.")
# Add ID to recognize afterwards
df_portion["material_id"] = material_ids[i:i+steps]
df = | pd.concat([df,df_portion]) | pandas.concat |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
self.assertRaises(KeyError, index.get_loc, (1, 1))
self.assertEqual(index.get_loc((2, 0)), slice(3, 5))
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
self.assertEqual(result, expected)
# self.assertRaises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert(rs == xp)
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)),
Index(lrange(4)),
Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
self.assertEqual(loc, expected)
self.assertTrue(new_index.equals(exp_index))
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
self.assertEqual(loc, expected)
self.assertIsNone(new_index)
self.assertRaises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)],
labels=[np.array([0, 0, 0, 0]),
np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
self.assertEqual(result, expected)
self.assertTrue(new_index.equals(index.droplevel(0)))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
(1, 3))
assertRaisesRegexp(TypeError, '^Level type mismatch', idx.slice_locs,
df.index[5] + timedelta(seconds=30), (5, 2))
df = | tm.makeCustomDataframe(5, 5) | pandas.util.testing.makeCustomDataframe |
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from long_duration_mdk import ( # calc_change_in_reserve,
calc_benefit_reserve,
calc_continuance,
calc_discount,
calc_interpolation,
calc_pv,
calc_pvfnb,
)
def test_calc_continuance():
mortality_rate = pd.Series([0.01, 0.015, 0.02])
lapse_rate = pd.Series([0.2, 0.1, 0.05])
lives_ed = calc_continuance(mortality_rate, lapse_rate)
assert_series_equal(lives_ed, ((1 - mortality_rate) * (1 - lapse_rate)).cumprod())
lives_bd = lives_ed.shift(1, fill_value=1)
lives_md = calc_continuance(mortality_rate / 2, starting_duration=lives_bd)
assert_series_equal(lives_md, lives_bd * (1 - mortality_rate / 2))
def test_calc_discount():
interest_rate = pd.Series([0.03, 0.04, 0.05])
v_ed = calc_discount(interest_rate)
assert_series_equal(v_ed, pd.Series([0.970874, 0.933532, 0.889079]))
v_md = calc_discount(interest_rate, t_adj=0.5)
assert_series_equal(v_md, pd.Series([0.985329, 0.952020, 0.911034]))
v_bd = calc_discount(interest_rate, t_adj=0)
assert_series_equal(v_bd, pd.Series([1, 0.970874, 0.933532]))
def test_calc_interpolation():
# test nonzero values
val_0 = pd.Series([1, 2, 3])
val_1 = pd.Series([2, 3, 4])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([1.5, 2.5, 3.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([1.414214, 2.449490, 3.464102]))
# test one zero value
val_0 = pd.Series([0, 1, 2])
val_1 = pd.Series([1, 2, 3])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([0.5, 1.5, 2.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([0.414214, 1.449490, 2.464102]))
# test two zero values
val_0 = pd.Series([0, 0, 1])
val_1 = pd.Series([0, 1, 2])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([0, 0.5, 1.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([0, 0.414214, 1.449490]))
# test value less than zero
val_0 = pd.Series([-1, 0, 1])
val_1 = pd.Series([0, 1, 2])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([-0.5, 0.5, 1.5]))
# log-linear raises ValueError
pytest.raises(ValueError, calc_interpolation, val_0, val_1, wt_0, method="log-linear")
def test_calc_pv():
assert_series_equal(calc_pv(pd.Series([3, 2, 1])), pd.Series([6, 3, 1]))
def test_calc_pvfnb():
pvfb = pd.Series([6, 5, 3])
pvfp = pd.Series([9, 6, 3])
pvfnb = calc_pvfnb(pvfb=pvfb, pvfp=pvfp, net_benefit_method="NLP")
assert_series_equal(pvfnb, pd.Series([6.0, 4.0, 2.0]))
def test_calc_benefit_reserve():
pvfb = pd.Series([6, 5, 3])
pvfp = pd.Series([9, 6, 3])
pvfnb = calc_pvfnb(pvfb=pvfb, pvfp=pvfp, net_benefit_method="NLP")
lives = pd.Series([0.95, 0.9, 0.8])
discount = | pd.Series([0.95, 0.9, 0.85]) | pandas.Series |
"""
@FileName: make_csv.py
@Description: Implement make_csv
@Author: Ryuk
@CreateDate: 2022/01/10
@LastEditTime: 2022/01/10
@LastEditors: Please set LastEditors
@Version: v0.1
"""
import glob
import pandas as pd
import argparse
from sklearn.utils import shuffle
parser = argparse.ArgumentParser()
parser.add_argument('--target', '-t', type=str, default="./dataset/target", help="target folder")
parser.add_argument('--background', '-b', type=str, default="./dataset/background", help="background folder")
parser.add_argument('--csv', '-c', type=str, default="./dataset/data.csv", help="output csv")
def make_csv(target_folder, background_folder):
target_pattern = target_folder + "/*.wav"
background_pattern = background_folder + "/*.wav"
target_list = glob.glob(target_pattern)
target_label = [1] * len(target_list)
background_list = glob.glob(background_pattern)
background_label = [0] * len(background_list)
target_df = | pd.DataFrame({"path": target_list, "label":target_label}) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum= | pd.concat([ss]) | pandas.concat |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Jul 22, 2018
"""
import unittest
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from causallib.estimation import IPW
class TestIPW(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Data:
X, a = make_classification(n_features=1, n_informative=1, n_redundant=0, n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0, class_sep=10.0)
cls.data_r_100 = {"X": pd.DataFrame(X), "a": pd.Series(a)}
X, a = make_classification(n_features=1, n_informative=1, n_redundant=0, n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.2, class_sep=10.0)
cls.data_r_80 = {"X": pd.DataFrame(X), "a": pd.Series(a)}
# Data that maps x=0->a=0 and x=1->a=1:
X = pd.Series([0] * 50 + [1] * 50)
cls.data_cat_r_100 = {"X": X.to_frame(), "a": X}
# Data that maps x=0->a=0 and x=1->a=1, but 10% of x=0->a=1 and 10% of x=1->a=0:
X = pd.Series([0] * 40 + [1] * 10 + [1] * 40 + [0] * 10).to_frame()
a = pd.Series([0] * 50 + [1] * 50)
cls.data_cat_r_80 = {"X": X, "a": a}
# Avoids regularization of the model:
cls.estimator = IPW(LogisticRegression(C=1e6, solver='lbfgs'), clip_min=0.05, clip_max=0.95,
use_stabilized=False)
def setUp(self):
self.estimator.fit(self.data_r_100["X"], self.data_r_100["a"])
def test_is_fitted(self):
self.assertTrue(hasattr(self.estimator.learner, "coef_"))
def test_weight_matrix_vector_matching(self):
a = self.data_r_100["a"]
p_vec = self.estimator.compute_weights(self.data_r_100["X"], a)
p_mat = self.estimator.compute_weight_matrix(self.data_r_100["X"], a)
self.assertEqual(p_vec.size, p_mat.shape[0])
for i in range(a.shape[0]):
self.assertAlmostEqual(p_mat.loc[i, a[i]], p_vec[i])
def test_weight_sizes(self):
a = self.data_r_100["a"]
with self.subTest("Weight vector size"):
p = self.estimator.compute_weights(self.data_r_100["X"], a)
self.assertEqual(len(p.shape), 1) # vector has no second axis
self.assertEqual(p.shape[0], a.shape[0])
with self.subTest("Weight matrix size"):
p = self.estimator.compute_weight_matrix(self.data_r_100["X"], a)
self.assertEqual(len(p.shape), 2) # Matrix has two dimensions
self.assertEqual(p.shape[0], a.shape[0])
self.assertEqual(p.shape[1], np.unique(a).size)
def ensure_truncation(self, test_weights):
with self.subTest("Estimator initialization parameters"):
p = self.estimator.compute_propensity(self.data_r_80["X"], self.data_r_80["a"])
if test_weights:
p = self.estimator.compute_weights(self.data_r_80["X"], self.data_r_80["a"]).pow(-1)
self.assertAlmostEqual(p.min(), 0.05)
self.assertAlmostEqual(p.max(), 1 - 0.05)
with self.subTest("Overwrite parameters in compute_weights"):
p = self.estimator.compute_propensity(self.data_r_80["X"], self.data_r_80["a"], clip_min=0.1, clip_max = 0.9)
if test_weights:
p = self.estimator.compute_weights(self.data_r_80["X"], self.data_r_80["a"], clip_min=0.1, clip_max=0.9).pow(-1)
self.assertAlmostEqual(p.min(), 0.1)
self.assertAlmostEqual(p.max(), 1 - 0.1)
with self.subTest("Test asymmetric clipping"):
p = self.estimator.compute_propensity(self.data_r_80["X"], self.data_r_80["a"], clip_min=0.2,
clip_max=0.9)
if test_weights:
p = self.estimator.compute_weights(self.data_r_80["X"], self.data_r_80["a"], clip_min=0.2,
clip_max=0.9).pow(-1)
self.assertAlmostEqual(p.min(), 0.2)
self.assertAlmostEqual(p.max(), 0.9)
with self.subTest("Test calculation of fraction of clipped observations"):
probabilities = | pd.DataFrame() | pandas.DataFrame |
from copy import Error
import os
from typing import Type
from ase.parallel import paropen, parprint, world
from ase.db import connect
from ase.io import read
from glob import glob
import numpy as np
from gpaw import restart
import BASIC.optimizer as opt
import sys
from ase.constraints import FixAtoms,FixedLine
import pandas as pd
from BASIC.utils import detect_cluster
def pbc_checker(slab):
anlges_arg=[angle != 90.0000 for angle in np.round(slab.cell.angles(),decimals=4)[:2]]
if np.any(anlges_arg):
slab.pbc=[1,1,1]
else:
slab.pbc=[1,1,0]
# def detect_cluster(slab,tol=0.1):
# n=len(slab)
# dist_matrix=np.zeros((n, n))
# slab_c=np.sort(slab.get_positions()[:,2])
# for i, j in itertools.combinations(list(range(n)), 2):
# if i != j:
# cdist = np.abs(slab_c[i] - slab_c[j])
# dist_matrix[i, j] = cdist
# dist_matrix[j, i] = cdist
# condensed_m = squareform(dist_matrix)
# z = linkage(condensed_m)
# clusters = fcluster(z, tol, criterion="distance")
# return slab_c,list(clusters)
def apply_magmom(opt_slab_magmom,ads_slab,adatom=1):
if adatom == 1:
magmom_ls=np.append(opt_slab_magmom,0)
elif adatom == 2:
magmom_ls=np.append(opt_slab_magmom,0)
magmom_ls=np.append(magmom_ls,0)
ads_slab.set_initial_magnetic_moments(magmom_ls)
return ads_slab
def get_clean_slab(element,
miller_index,
report_location,
target_dir,
size,
fix_layer,
solver_fmax,
solver_maxstep,
gpaw_calc):
f = paropen(report_location,'a')
parprint('Start clean slab calculation: ', file=f)
if size != '1x1':
clean_slab_gpw_path=target_dir+'/clean_slab/slab.gpw'
if os.path.isfile(clean_slab_gpw_path):
opt_slab, pre_calc = restart(clean_slab_gpw_path)
pre_kpts=list(pre_calc.__dict__['parameters']['kpts'])
set_kpts=list(gpaw_calc.__dict__['parameters']['kpts'])
if pre_kpts == set_kpts:
parprint('\t'+size+' clean slab is pre-calculated with kpts matched.',file=f)
else:
parprint('\t'+size+' clean slab pre-calculated has different kpts. Clean slab needs to re-calculate.', file=f)
parprint('\t'+'Calculating '+size+' clean slab...',file=f)
clean_slab=read(target_dir+'/clean_slab/input.traj')
opt_slab=clean_slab_calculator(clean_slab,fix_layer,gpaw_calc,target_dir,solver_fmax,solver_maxstep)
else:
parprint('\t'+size+' clean slab is not pre-calculated.',file=f)
parprint('\t'+'Calculating '+size+' clean slab...',file=f)
interm_gpw=target_dir+'/clean_slab/slab_interm.gpw'
if os.path.isfile(interm_gpw):
clean_slab, gpaw_calc=restart(interm_gpw)
else:
clean_slab=read(target_dir+'/clean_slab/input.traj')
opt_slab=clean_slab_calculator(clean_slab,fix_layer,gpaw_calc,target_dir,solver_fmax,solver_maxstep)
else:
parprint('\tslab size is 1x1. Clean slab calculation is skipped.', file=f)
opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(simple_name=element+'_'+miller_index)
parprint(' ',file=f)
f.close()
return opt_slab.get_potential_energy(), opt_slab.get_magnetic_moments()
def clean_slab_calculator(clean_slab,
fix_layer,
gpaw_calc,
target_dir,
solver_fmax,
solver_maxstep,
fix_option='bottom'):
pbc_checker(clean_slab)
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
clean_slab.set_initial_magnetic_moments([0]*len(clean_slab))
slab_c_coord,cluster=detect_cluster(clean_slab)
if fix_option == 'bottom':
unique_cluster_index=sorted(set(cluster), key=cluster.index)[fix_layer-1]
max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
fix_mask=clean_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
fixed_atom_constrain=FixAtoms(mask=fix_mask)
clean_slab.set_constraint(fixed_atom_constrain)
clean_slab.set_calculator(gpaw_calc)
opt.relax(clean_slab,target_dir+'/clean_slab',fmax=solver_fmax,maxstep=solver_maxstep)
return clean_slab
def adsorption_energy_calculator(traj_file,
report_location,
opt_slab_energy,
adatom_pot_energy,
opt_slab_magmom,
gpaw_calc,
solver_fmax,
solver_maxstep,
calc_type,
fix_layer,
fix_option = 'bottom'):
interm_gpw='/'.join(traj_file.split('/')[:-1]+['slab_interm.gpw'])
if os.path.isfile(interm_gpw):
ads_slab, gpaw_calc=restart(interm_gpw)
else:
ads_slab=read(traj_file)
pbc_checker(ads_slab)
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
ads_slab=apply_magmom(opt_slab_magmom,ads_slab)
fixed_line_constrain=FixedLine(a=-1,direction=[0,0,1])
slab_c_coord,cluster=detect_cluster(ads_slab)
if fix_option == 'bottom':
unique_cluster_index=sorted(set(cluster), key=cluster.index)[fix_layer-1]
max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
if calc_type == 'grid':
fixed_atom_constrain=FixAtoms(mask=fix_mask)
ads_slab.set_constraint([fixed_atom_constrain,fixed_line_constrain])
elif calc_type == 'normal' and fix_option == 'bottom':
fixed_atom_constrain=FixAtoms(mask=fix_mask)
ads_slab.set_constraint(fixed_atom_constrain)
ads_slab.set_calculator(gpaw_calc)
location='/'.join(traj_file.split('/')[:-1])
f=paropen(report_location,'a')
parprint('Calculating '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
f.close()
opt.relax(ads_slab,location,fmax=solver_fmax,maxstep=solver_maxstep)
init_ads_site=traj_file.split('/')[-2]
E_slab_ads=ads_slab.get_potential_energy()
opt_slab_energy=opt_slab_energy
adsorption_energy=E_slab_ads-(opt_slab_energy+adatom_pot_energy)
final_ads_site=list(np.round(ads_slab.get_positions()[-1][:2],decimals=3))
final_ads_site_str='_'.join([str(i) for i in final_ads_site])
return init_ads_site, adsorption_energy, final_ads_site_str
def skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy):
f = paropen(report_location,'a')
parprint('Restarting...',file=f)
for gpw_file in all_gpw_files:
location='/'.join(gpw_file.split('/')[:-1])
parprint('Skipping '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
atoms=restart(gpw_file)[0]
init_adsorbates_site_lst.append(gpw_file.split('/')[-2])
E_slab_ads=atoms.get_potential_energy()
adsorption_energy=E_slab_ads-(opt_slab_energy+adatom_pot_energy)
adsorption_energy_lst.append(adsorption_energy)
final_ads_site=list(np.round(atoms.get_positions()[-1][:2],decimals=3))
final_ads_site_str='_'.join([str(i) for i in final_ads_site])
final_adsorbates_site_lst.append(final_ads_site_str)
parprint(' ',file=f)
f.close()
return init_adsorbates_site_lst,adsorption_energy_lst,final_adsorbates_site_lst
def initialize_report(report_location,gpaw_calc):
calc_dict=gpaw_calc.__dict__['parameters']
if world.rank==0 and os.path.isfile(report_location):
os.remove(report_location)
f = paropen(report_location,'a')
parprint('Initial Parameters:', file=f)
parprint('\t'+'xc: '+calc_dict['xc'],file=f)
parprint('\t'+'h: '+str(calc_dict['h']),file=f)
parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
parprint('\t'+'sw: '+str(calc_dict['occupations']),file=f)
parprint('\t'+'spin polarized: '+str(calc_dict['spinpol']),file=f)
if calc_dict['spinpol']:
parprint('\t'+'magmom: initialize magnetic moment from slab calculation.',file=f)
parprint(' ',file=f)
f.close()
class ads_auto_select:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
adatom_pot_energy,
solver_fmax,
solver_max_step,
restart_calc,
size=(1,1), #xy size
fix_layer=2,
fix_option='bottom'):
#initalize variable
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
report_location=target_dir+'_autocat_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
## TO-DO: need to figure out how to calculate adsorption energy for larger system
# self.gpaw_calc=gpaw_calc
# self.calc_dict=self.gpaw_calc.__dict__['parameters']
# self.ads=ads
# self.all_ads_file_loc=self.target_dir+'/'+'adsorbates/'+str(self.ads)+'/'
# self.adatom_pot_energy=adatom_pot_energy
##generate report
initialize_report(report_location, gpaw_calc)
##compute clean slab energy
opt_slab_energy, opt_slab_magmom=get_clean_slab(element, miller_index_tight,
report_location, target_dir,size_xy,
fix_layer,solver_fmax,solver_max_step,
gpaw_calc)
#opt_slab=self.get_clean_slab()
##start adsorption calculation
adsorption_energy_dict={}
init_adsorbates_site_lst=[]
final_adsorbates_site_lst=[]
adsorption_energy_lst=[]
all_bridge_traj_files=glob(all_ads_file_loc+'bridge/*/input.traj')
all_ontop_traj_files=glob(all_ads_file_loc+'ontop/*/input.traj')
all_hollow_traj_files=glob(all_ads_file_loc+'hollow/*/input.traj')
all_traj_files=all_bridge_traj_files+all_ontop_traj_files+all_hollow_traj_files
all_bridge_gpw_files=glob(all_ads_file_loc+'bridge/*/slab.gpw')
all_ontop_gpw_files=glob(all_ads_file_loc+'ontop/*/slab.gpw')
all_hollow_gpw_files=glob(all_ads_file_loc+'hollow/*/slab.gpw')
all_gpw_files=all_bridge_gpw_files+all_ontop_gpw_files+all_hollow_gpw_files
## restart
if restart_calc==True and len(all_gpw_files)>=1:
init_adsorbates_site_lst,adsorption_energy_lst,final_adsorbates_site_lst=skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy)
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
#init_adsobates_site, adsorption_energy, final_adsorbates_site=self.adsorption_energy_calculator(traj_file,opt_slab)
output_lst=adsorption_energy_calculator(traj_file,report_location,
opt_slab_energy,adatom_pot_energy,
opt_slab_magmom,gpaw_calc,
solver_fmax,solver_max_step,
calc_type='normal',
fix_layer=fix_layer,fix_option = fix_option,
)
init_adsorbates_site_lst.append(output_lst[0])
adsorption_energy_lst.append(output_lst[1])
final_adsorbates_site_lst.append(output_lst[2])
adsorption_energy_dict['init_sites[x_y](Ang)']=init_adsorbates_site_lst
adsorption_energy_dict['final_sites[x_y](Ang)']=final_adsorbates_site_lst
adsorption_energy_dict['adsorption_energy(eV)']=adsorption_energy_lst
ads_df=pd.DataFrame(adsorption_energy_dict)
# ads_df.set_index('init_adsorbates_sites[x_y](Ang)',inplace=True)
ads_df.sort_values(by=['adsorption_energy(eV)'],inplace=True)
pd.set_option("display.max_rows", None, "display.max_columns", None)
f=paropen(report_location,'a')
parprint(ads_df,file=f)
parprint('',file=f)
f.close()
min_adsorbates_site=ads_df.iloc[[0]]['init_sites[x_y](Ang)'].to_list()[0]
lowest_ads_energy_slab=read(glob(all_ads_file_loc+'*/'+min_adsorbates_site+'/slab.traj')[0])
#finalize
final_slab_simple_name=element+'_'+miller_index_tight
ads_db=connect('final_database/ads_'+size_xy+'.db')
id=ads_db.reserve(name=final_slab_simple_name)
if id is None:
id=ads_db.get(name=final_slab_simple_name).id
ads_db.update(id=id,atoms=lowest_ads_energy_slab,name=final_slab_simple_name,
ads_pot_e=float(ads_df.iloc[[0]]['adsorption_energy(eV)'].to_list()[0]))
else:
ads_db.write(lowest_ads_energy_slab,
id=id,
name=final_slab_simple_name,
ads_pot_e=float(ads_df.iloc[[0]]['adsorption_energy(eV)'].to_list()[0]))
f=paropen(report_location,'a')
parprint('Adsorption energy calculation complete.',file=f)
parprint('Selected ads site is: ',file=f)
parprint(min_adsorbates_site,file=f)
f.close()
# def get_clean_slab(self):
# f = paropen(self.report_location,'a')
# parprint('Start clean slab calculation: ', file=f)
# if self.size != '1x1':
# clean_slab_gpw_path=self.target_dir+'/clean_slab/slab.gpw'
# clean_slab=read(self.target_dir+'/clean_slab/input.traj')
# if os.path.isfile(clean_slab_gpw_path):
# opt_slab, pre_calc = restart(clean_slab_gpw_path)
# pre_kpts=pre_calc.__dict__['parameters']['kpts']
# set_kpts=self.calc_dict['kpts']
# if pre_kpts == set_kpts:
# parprint('\t'+self.size+' clean slab is pre-calculated with kpts matched.',file=f)
# else:
# parprint('\t'+self.size+' clean slab pre-calculated has different kpts. Clean slab needs to re-calculate.', file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('\t'+self.size+' clean slab is not pre-calculated.',file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('slab size is 1x1. Clean slab calculation is skipped.', file=f)
# opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(simple_name=self.element+'_'+self.miller_index_tight)
# f.close()
# return opt_slab
# def clean_slab_calculator(self,clean_slab):
# pbc_checker(clean_slab)
# if self.calc_dict['spinpol']:
# clean_slab.set_initial_magnetic_moments([0]*len(clean_slab))
# slab_c_coord,cluster=detect_cluster(clean_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=clean_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# clean_slab.set_constraint(fixed_atom_constrain)
# clean_slab.set_calculator(self.gpaw_calc)
# opt.relax(clean_slab,self.target_dir+'/clean_slab',fmax=self.solver_fmax,maxstep=self.solver_max_step)
# return clean_slab
# def adsorption_energy_calculator(self,traj_file,opt_slab):
# ads_slab=read(traj_file)
# pbc_checker(ads_slab)
# if self.calc_dict['spinpol']:
# ads_slab=apply_magmom(opt_slab,ads_slab)
# slab_c_coord,cluster=detect_cluster(ads_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# ads_slab.set_constraint(fixed_atom_constrain)
# ads_slab.set_calculator(self.gpaw_calc)
# location='/'.join(traj_file.split('/')[:-1])
# f=paropen(self.report_location,'a')
# parprint('Calculating '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
# f.close()
# opt.relax(ads_slab,location,fmax=self.solver_fmax,maxstep=self.solver_max_step)
# init_ads_site=traj_file.split('/')[-2]
# E_slab_ads=ads_slab.get_potential_energy()
# opt_slab_energy=opt_slab.get_potential_energy()*int(self.size[0])*int(self.size[2])
# adsorption_energy=E_slab_ads-(opt_slab_energy+self.adatom_pot_energy)
# final_ads_site=list(np.round(ads_slab.get_positions()[-1][:2],decimals=3))
# final_ads_site_str='_'.join([str(i) for i in final_ads_site])
# return init_ads_site, adsorption_energy, final_ads_site_str
# def apply_magmom(self,opt_slab,ads_slab):
# slab_formula=ads_slab.get_chemical_symbols()
# magmom=opt_slab.get_magnetic_moments()
# magmom_ls=np.append(magmom,np.mean(magmom))
# magmom_ls[slab_formula.index(self.ads)]=0
# ads_slab.set_initial_magnetic_moments(magmom_ls)
# def initialize_report(self,report_location,gpaw_calc):
# calc_dict=gpaw_calc.__dict__['parameters']
# if world.rank==0 and os.path.isfile(report_location):
# os.remove(report_location)
# f = paropen(report_location,'a')
# parprint('Initial Parameters:', file=f)
# parprint('\t'+'xc: '+calc_dict['xc'],file=f)
# parprint('\t'+'h: '+str(calc_dict['h']),file=f)
# parprint('\t'+'kpts: '+str(calc_dict['kpts']),file=f)
# parprint('\t'+'sw: '+str(calc_dict['occupations']),file=f)
# parprint('\t'+'spin polarized: '+str(calc_dict['spinpol']),file=f)
# if calc_dict['spinpol']:
# parprint('\t'+'magmom: initialize magnetic moment from slab calculation.',file=f)
# parprint(' ',file=f)
# f.close()
class ads_grid_calc:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
adatom_pot_energy,
solver_fmax,
solver_max_step,
restart_calc,
size,
fix_layer=2,
fix_option='bottom'):
#initalize variables
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
report_location=target_dir+'_grid_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
## TO-DO: need to figure out how to calculate adsorption energy for larger system
# self.gpaw_calc=gpaw_calc
# self.calc_dict=self.gpaw_calc.__dict__['parameters']
# self.ads=ads
#self.all_ads_file_loc=self.target_dir+'/'+'adsorbates/'+str(self.ads)+'/'
#self.adatom_pot_energy=adatom_pot_energy
##generate report
initialize_report(report_location,gpaw_calc)
##compute clean slab energy
opt_slab_energy, opt_slab_magmom=get_clean_slab(element, miller_index_tight,
report_location, target_dir, size_xy,
fix_layer,solver_fmax,solver_max_step,
gpaw_calc)
##start adsorption calculation
adsorption_energy_dict={}
init_adsorbates_site_lst=[]
adsorption_energy_lst=[]
final_adsorbates_site_lst=[]
all_traj_files=glob(all_ads_file_loc+'grid/*/input.traj')
all_gpw_files=glob(all_ads_file_loc+'grid/*/slab.gpw')
## restart
if restart_calc==True and len(all_gpw_files)>=1:
init_adsorbates_site_lst,adsorption_energy_lst=skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy)[0:2]
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
output_lst=adsorption_energy_calculator(traj_file,report_location,
opt_slab_energy,adatom_pot_energy,
opt_slab_magmom,gpaw_calc,
solver_fmax,solver_max_step,
calc_type='grid',
fix_layer=fix_layer,fix_option = 'bottom',
)
init_adsorbates_site_lst.append(output_lst[0])
adsorption_energy_lst.append(output_lst[1])
adsorption_energy_dict['init_sites[x_y](Ang)']=init_adsorbates_site_lst
adsorption_energy_dict['adsorption_energy(eV)']=adsorption_energy_lst
ads_df=pd.DataFrame(adsorption_energy_dict)
#ads_df.set_index('init_adsorbates_sites[x_y](Ang)',inplace=True)
ads_df.sort_values(by=['adsorption_energy(eV)'],inplace=True)
ads_df.to_csv(target_dir+'_ads_grid.csv')
pd.set_option("display.max_rows", None, "display.max_columns", None)
f=paropen(report_location,'a')
parprint(ads_df,file=f)
parprint('',file=f)
parprint('Grid adsorption energy calculation complete.',file=f)
f.close()
# def get_clean_slab(self):
# f = paropen(self.report_location,'a')
# parprint('Start clean slab calculation: ', file=f)
# if self.size != '1x1':
# clean_slab_gpw_path=self.target_dir+'/clean_slab/slab.gpw'
# clean_slab=read(self.target_dir+'/clean_slab/input.traj')
# if os.path.isfile(clean_slab_gpw_path):
# opt_slab, pre_calc = restart(clean_slab_gpw_path)
# pre_kpts=pre_calc.__dict__['parameters']['kpts']
# set_kpts=self.calc_dict['kpts']
# if pre_kpts == set_kpts:
# parprint('\t'+self.size+' clean slab is pre-calculated with kpts matched.',file=f)
# else:
# parprint('\t'+self.size+' clean slab pre-calculated has different kpts. Clean slab needs to re-calculate.', file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('\t'+self.size+' clean slab is not pre-calculated.',file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('slab size is 1x1. Clean slab calculation is skipped.', file=f)
# opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(simple_name=self.element+'_'+self.miller_index_tight)
# f.close()
# return opt_slab
# def clean_slab_calculator(self,clean_slab):
# pbc_checker(clean_slab)
# if self.calc_dict['spinpol']:
# clean_slab.set_initial_magnetic_moments([0]*len(clean_slab))
# slab_c_coord,cluster=detect_cluster(clean_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=clean_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# clean_slab.set_constraint(fixed_atom_constrain)
# clean_slab.set_calculator(self.gpaw_calc)
# opt.relax(clean_slab,self.target_dir+'/clean_slab',fmax=self.solver_fmax,maxstep=self.solver_max_step)
# return clean_slab
# def adsorption_energy_calculator(self,traj_file,opt_slab):
# ads_slab=read(traj_file)
# pbc_checker(ads_slab)
# if self.calc_dict['spinpol']:
# ads_slab=apply_magmom(opt_slab,ads_slab)
# fixed_line_constrain=FixedLine(a=-1,direction=[0,0,1])
# slab_c_coord,cluster=detect_cluster(ads_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# ads_slab.set_constraint([fixed_atom_constrain,fixed_line_constrain])
# ads_slab.set_calculator(self.gpaw_calc)
# location='/'.join(traj_file.split('/')[:-1])
# f=paropen(self.report_location,'a')
# parprint('Calculating '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
# f.close()
# opt.relax(ads_slab,location,fmax=self.solver_fmax,maxstep=self.solver_max_step)
# init_ads_site=traj_file.split('/')[-2]
# adsorption_energy=ads_slab.get_potential_energy()-(opt_slab.get_potential_energy()+self.adatom_pot_energy)
# return init_ads_site, adsorption_energy
# def apply_magmom(self,opt_slab,ads_slab):
# slab_formula=ads_slab.get_chemical_symbols()
# magmom=opt_slab.get_magnetic_moments()
# magmom_ls=np.append(magmom,np.mean(magmom))
# magmom_ls[slab_formula.index(self.ads)]=0
# ads_slab.set_initial_magnetic_moments(magmom_ls)
# def initialize_report(self):
# if world.rank==0 and os.path.isfile(self.report_location):
# os.remove(self.report_location)
# f = paropen(self.report_location,'a')
# parprint('Initial Parameters:', file=f)
# parprint('\t'+'xc: '+self.calc_dict['xc'],file=f)
# parprint('\t'+'h: '+str(self.calc_dict['h']),file=f)
# parprint('\t'+'kpts: '+str(self.calc_dict['kpts']),file=f)
# parprint('\t'+'sw: '+str(self.calc_dict['occupations']),file=f)
# parprint('\t'+'spin polarized: '+str(self.calc_dict['spinpol']),file=f)
# if self.calc_dict['spinpol']:
# parprint('\t'+'magmom: initial magnetic moment from slab calculation.',file=f)
# parprint(' ',file=f)
# f.close()
class ads_lowest_ads_site_calc:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
adatom_pot_energy,
solver_fmax,
solver_max_step,
restart_calc,
size, #xy size
fix_layer=2,
fix_option='bottom'):
#initalize
##globlalize variable
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
report_location=target_dir+'_lowest_ads_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
##generate report
initialize_report(report_location, gpaw_calc)
##compute clean slab energy
opt_slab_energy, opt_slab_magmom=get_clean_slab(element, miller_index_tight,
report_location, target_dir, size_xy,
fix_layer,solver_fmax,solver_max_step,
gpaw_calc)
##start adsorption calculation
adsorption_energy_dict={}
init_adsorbates_site_lst=[]
final_adsorbates_site_lst=[]
adsorption_energy_lst=[]
all_traj_files=glob(all_ads_file_loc+'lowest_ads_site/*/input.traj')
all_gpw_files=glob(all_ads_file_loc+'lowest_ads_site/*/slab.gpw')
if restart_calc==True and len(all_gpw_files)>=1:
init_adsorbates_site_lst,adsorption_energy_lst=skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy)[0:2]
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
output_lst=adsorption_energy_calculator(traj_file,report_location,
opt_slab_energy,adatom_pot_energy,
opt_slab_magmom,gpaw_calc,
solver_fmax,solver_max_step,
calc_type='normal',
fix_layer=fix_layer,fix_option = 'bottom',
)
init_adsorbates_site_lst.append(output_lst[0])
adsorption_energy_lst.append(output_lst[1])
final_adsorbates_site_lst.append(output_lst[2])
adsorption_energy_dict['init_sites[x_y](Ang)']=init_adsorbates_site_lst
adsorption_energy_dict['final_sites[x_y](Ang)']=final_adsorbates_site_lst
adsorption_energy_dict['adsorption_energy(eV)']=adsorption_energy_lst
ads_df=pd.DataFrame(adsorption_energy_dict)
# ads_df.set_index('init_adsorbates_sites[x_y](Ang)',inplace=True)
ads_df.sort_values(by=['adsorption_energy(eV)'],inplace=True)
pd.set_option("display.max_rows", None, "display.max_columns", None)
f=paropen(report_location,'a')
parprint(ads_df,file=f)
parprint('',file=f)
f.close()
min_adsorbates_site=ads_df.iloc[[0]]['init_sites[x_y](Ang)'].to_list()[0]
lowest_ads_energy_slab=read(glob(all_ads_file_loc+'*/'+min_adsorbates_site+'/slab.traj')[0])
#finalize
final_slab_simple_name=element+'_'+miller_index_tight
ads_db=connect('final_database/ads_'+size_xy+'.db')
id=ads_db.reserve(name=final_slab_simple_name)
if id is None:
id=ads_db.get(name=final_slab_simple_name).id
ads_db.update(id=id,atoms=lowest_ads_energy_slab,name=final_slab_simple_name,
ads_pot_e=float(ads_df.iloc[[0]]['adsorption_energy(eV)'].to_list()[0]))
else:
ads_db.write(lowest_ads_energy_slab,
id=id,
name=final_slab_simple_name,
ads_pot_e=float(ads_df.iloc[[0]]['adsorption_energy(eV)'].to_list()[0]))
f=paropen(report_location,'a')
parprint('Adsorption energy calculation complete.',file=f)
parprint('Selected ads site is: ',file=f)
parprint(min_adsorbates_site,file=f)
f.close()
# def get_clean_slab(self):
# f = paropen(self.report_location,'a')
# parprint('Start clean slab calculation: ', file=f)
# if self.size != '1x1':
# clean_slab_gpw_path=self.target_dir+'/clean_slab/slab.gpw'
# clean_slab=read(self.target_dir+'/clean_slab/input.traj')
# if os.path.isfile(clean_slab_gpw_path):
# opt_slab, pre_calc = restart(clean_slab_gpw_path)
# pre_kpts=pre_calc.__dict__['parameters']['kpts']
# set_kpts=self.calc_dict['kpts']
# if pre_kpts == set_kpts:
# parprint('\t'+self.size+' clean slab is pre-calculated with kpts matched.',file=f)
# else:
# parprint('\t'+self.size+' clean slab pre-calculated has different kpts. Clean slab needs to re-calculate.', file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('\t'+self.size+' clean slab is not pre-calculated.',file=f)
# parprint('\t'+'Calculating '+self.size+' clean slab...',file=f)
# opt_slab=self.clean_slab_calculator(clean_slab)
# else:
# parprint('slab size is 1x1. Clean slab calculation is skipped.', file=f)
# opt_slab=connect('final_database'+'/'+'surf.db').get_atoms(simple_name=self.element+'_'+self.miller_index_tight)
# parprint(' ',file=f)
# f.close()
# return opt_slab
# def clean_slab_calculator(self,clean_slab):
# pbc_checker(clean_slab)
# if self.calc_dict['spinpol']:
# clean_slab.set_initial_magnetic_moments([0]*len(clean_slab))
# slab_c_coord,cluster=detect_cluster(clean_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=clean_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# clean_slab.set_constraint(fixed_atom_constrain)
# clean_slab.set_calculator(self.gpaw_calc)
# opt.relax(clean_slab,self.target_dir+'/clean_slab',fmax=self.solver_fmax,maxstep=self.solver_max_step)
# return clean_slab
# def adsorption_energy_calculator(self,traj_file,opt_slab):
# ads_slab=read(traj_file)
# pbc_checker(ads_slab)
# if self.calc_dict['spinpol']:
# ads_slab=apply_magmom(opt_slab,ads_slab)
# slab_c_coord,cluster=detect_cluster(ads_slab)
# if self.fix_option == 'bottom':
# unique_cluster_index=sorted(set(cluster), key=cluster.index)[self.fix_layer-1]
# max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
# fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
# else:
# raise RuntimeError('Only bottom fix option available now.')
# fixed_atom_constrain=FixAtoms(mask=fix_mask)
# ads_slab.set_constraint(fixed_atom_constrain)
# ads_slab.set_calculator(self.gpaw_calc)
# location='/'.join(traj_file.split('/')[:-1])
# f=paropen(self.report_location,'a')
# parprint('\tCalculating '+('/'.join(location.split('/')[-2:]))+' adsorption site...',file=f)
# f.close()
# opt.relax(ads_slab,location,fmax=self.solver_fmax,maxstep=self.solver_max_step)
# init_ads_site=traj_file.split('/')[-2]
# E_slab_ads=ads_slab.get_potential_energy()
# opt_slab_energy=opt_slab.get_potential_energy()
# adsorption_energy=E_slab_ads-(opt_slab_energy+self.adatom_pot_energy)
# final_ads_site=list(np.round(ads_slab.get_positions()[-1][:2],decimals=3))
# final_ads_site_str='_'.join([str(i) for i in final_ads_site])
# return init_ads_site, adsorption_energy, final_ads_site_str
# def initialize_report(self):
# if world.rank==0 and os.path.isfile(self.report_location):
# os.remove(self.report_location)
# f = paropen(self.report_location,'a')
# parprint('Initial Parameters:', file=f)
# parprint('\t'+'xc: '+self.calc_dict['xc'],file=f)
# parprint('\t'+'h: '+str(self.calc_dict['h']),file=f)
# parprint('\t'+'kpts: '+str(self.calc_dict['kpts']),file=f)
# parprint('\t'+'sw: '+str(self.calc_dict['occupations']),file=f)
# parprint('\t'+'spin polarized: '+str(self.calc_dict['spinpol']),file=f)
# if self.calc_dict['spinpol']:
# parprint('\t'+'magmom: initial magnetic moment from slab calculation.',file=f)
# parprint(' ',file=f)
# f.close()
class ads_NN_interact_calc:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
solver_fmax,
solver_max_step,
restart_calc,
size, #xy size
sub_dir,
fix_layer=2,
fix_option='bottom'):
#initalize
##globlalize variable
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
#report_location=target_dir+'_lowest_ads_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
##start adsorption calculation
# adsorption_energy_dict={}
# init_adsorbates_site_lst=[]
# final_adsorbates_site_lst=[]
# adsorption_energy_lst=[]
all_traj_files=glob(all_ads_file_loc+sub_dir+'/*/input.traj')
all_gpw_files=glob(all_ads_file_loc+sub_dir+'/*/slab.gpw')
if restart_calc==True and len(all_gpw_files)>=1:
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
interm_gpw='/'.join(traj_file.split('/')[:-1]+['slab_interm.gpw'])
if os.path.isfile(interm_gpw):
ads_slab, gpaw_calc=restart(interm_gpw)
else:
ads_slab=read(traj_file)
pbc_checker(ads_slab)
calc_dict=gpaw_calc.__dict__['parameters']
if calc_dict['spinpol']:
raise RuntimeError('spin polarization calculation not supported.')
slab_c_coord,cluster=detect_cluster(ads_slab)
if fix_option == 'bottom':
unique_cluster_index=sorted(set(cluster), key=cluster.index)[fix_layer-1]
max_height_fix=max(slab_c_coord[cluster==unique_cluster_index])
fix_mask=ads_slab.positions[:,2]<(max_height_fix+0.05) #add 0.05 Ang to make sure all bottom fixed
else:
raise RuntimeError('Only bottom fix option available now.')
fixed_atom_constrain=FixAtoms(mask=fix_mask)
ads_slab.set_constraint(fixed_atom_constrain)
ads_slab.set_calculator(gpaw_calc)
location='/'.join(traj_file.split('/')[:-1])
opt.relax(ads_slab,location,fmax=solver_fmax,maxstep=solver_max_step)
class ads_custom_ads_site_calc:
def __init__(self,
element,
miller_index_tight,
gpaw_calc,
ads,
adatom_pot_energy,
solver_fmax,
solver_max_step,
restart_calc,
size, #xy size
fix_layer=2,
fix_option='bottom'):
#initalize
##globlalize variable
size_xy=str(size[0])+'x'+str(size[1])
target_dir='results/'+element+'/'+'ads/'+size_xy+'/'+miller_index_tight
report_location=target_dir+'_custom_ads_results_report.txt'
all_ads_file_loc=target_dir+'/'+'adsorbates/'+str(ads)+'/'
##generate report
initialize_report(report_location, gpaw_calc)
##compute clean slab energy
opt_slab_energy, opt_slab_magmom=get_clean_slab(element, miller_index_tight,
report_location, target_dir, size_xy,
fix_layer,solver_fmax,solver_max_step,
gpaw_calc)
##start adsorption calculation
adsorption_energy_dict={}
init_adsorbates_site_lst=[]
final_adsorbates_site_lst=[]
adsorption_energy_lst=[]
all_traj_files=glob(all_ads_file_loc+'custom/*/input.traj')
all_gpw_files=glob(all_ads_file_loc+'custom/*/slab.gpw')
if restart_calc==True and len(all_gpw_files)>=1:
init_adsorbates_site_lst,adsorption_energy_lst=skip_ads_calculated(report_location,
all_gpw_files,
init_adsorbates_site_lst,
adsorption_energy_lst,
final_adsorbates_site_lst,
opt_slab_energy,
adatom_pot_energy)[0:2]
all_gpw_files_ads_site=['/'.join(i.split('/')[:-1]) for i in all_gpw_files]
all_traj_files=[i for i in all_traj_files if '/'.join(i.split('/')[:-1]) not in all_gpw_files_ads_site]
for traj_file in all_traj_files:
output_lst=adsorption_energy_calculator(traj_file,report_location,
opt_slab_energy,adatom_pot_energy,
opt_slab_magmom,gpaw_calc,
solver_fmax,solver_max_step,
calc_type='normal',
fix_layer=fix_layer,fix_option = 'bottom',
)
init_adsorbates_site_lst.append(output_lst[0])
adsorption_energy_lst.append(output_lst[1])
final_adsorbates_site_lst.append(output_lst[2])
adsorption_energy_dict['init_sites[x_y](Ang)']=init_adsorbates_site_lst
adsorption_energy_dict['final_sites[x_y](Ang)']=final_adsorbates_site_lst
adsorption_energy_dict['adsorption_energy(eV)']=adsorption_energy_lst
ads_df= | pd.DataFrame(adsorption_energy_dict) | pandas.DataFrame |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from datetime import datetime
from typing import Any, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_consts import (
AnomalyResponse,
ChangePointInterval,
ConfidenceBand,
PercentageChange,
)
from kats.utils.decomposition import TimeSeriesDecomposition
"""Statistical Significance Detector Module
This module contains simple detectors that apply a t-test over a rolling window to compare
check if there is a statistically significant increase or decrease between the control and test
time periods. In addition to the univariate version of this test, this module includes a
multivariate version that uses a false discovery rate (FDR) controlling procedure to reduce noise.
"""
class StatSigDetectorModel(DetectorModel):
"""
StatSigDetectorModel is a simple detector, which compares a control and test period.
The detector assumes that the time series data comes from a iid normal distribution,
and applies a t-test to check if the means between the control and test period are
significantly different.
We start with the history data, and then as for the current data, we apply a rolling
window, adding one data point at a time from the current data, and detecting significant
change. We return the t-statistic as a score, which reflects the severity of the
change.
We suggest using n_control >= 30 to get good estimates
Attributes:
n_control: number of data points(or time units) of history to compare with
n_test: number of points(or time_units) to compare the history with
serialized_model: serialized json containing the parameters
time_units: units of time used to measure the intervals. If not provided
we infer it from the provided data.
rem_season: default value is False, if remove seasonality for historical data and data
seasonal_period: str, default value is 'weekly'. Other possible values: 'daily', 'biweekly', 'monthly', 'yearly'
use_corrected_scores: bool, default value is False, using original t-scores or correct t-scores.
max_split_ts_length: int, default value is 500. If the given TS (except historical part) is longer than max_split_ts_length,
we will transform a long univariate TS into a multi-variate TS and then use multistatsig detector, which is faster,
>>> # Example usage:
>>> # history and ts_pt are TimeSeriesData objects and history is larger
>>> # than (n_control + n_test) so that we have sufficient history to
>>> # run the detector
>>> n_control = 28
>>> n_test = 7
>>> import random
>>> control_time = pd.date_range(start='2018-01-01', freq='D', periods=(n_control + n_test))
>>> test_time = pd.date_range(start='2018-02-05', freq='D', periods=n_test)
>>> control_val = [random.normalvariate(100,10) for _ in range(n_control + n_test)]
>>> test_val = [random.normalvariate(120,10) for _ in range(n_test)]
>>> hist_ts = TimeSeriesData(time=control_time, value=pd.Series(control_val))
>>> data_ts = TimeSeriesData(time=test_time, value=pd.Series(test_val))
>>> ss_detect = StatSigDetectorModel(n_control=n_control, n_test=n_test)
>>> anom = ss_detect.fit_predict(data=data_ts, historical_data=hist_ts)
"""
data: Optional[TimeSeriesData] = None
def __init__(
self,
n_control: Optional[int] = None,
n_test: Optional[int] = None,
serialized_model: Optional[bytes] = None,
time_unit: Optional[str] = None,
rem_season: bool = False,
seasonal_period: str = "weekly",
use_corrected_scores: bool = True,
max_split_ts_length: int = 500,
) -> None:
if serialized_model:
model_dict = json.loads(serialized_model)
self.n_test: int = model_dict["n_test"]
self.n_control: int = model_dict["n_control"]
self.time_unit: str = model_dict["time_unit"]
# for seasonality
self.rem_season: bool = model_dict.get("rem_season", rem_season)
self.seasonal_period: str = model_dict.get(
"seasonal_period", seasonal_period
)
# for big data and correct t-scores
self.use_corrected_scores: bool = model_dict.get(
"use_corrected_scores", use_corrected_scores
)
# threshold for splitting long TS
self.max_split_ts_length: int = model_dict.get(
"max_split_ts_length", max_split_ts_length
)
else:
self.n_test: Optional[int] = n_test
self.n_control: Optional[int] = n_control
self.time_unit: Optional[str] = time_unit
# for seasonality
self.rem_season: bool = rem_season
self.seasonal_period: str = seasonal_period
# big data and t-scores
self.use_corrected_scores: bool = use_corrected_scores
# threshold for splitting long TS
self.max_split_ts_length: int = max_split_ts_length
if (self.n_control is None) or (self.n_test is None):
raise ValueError(
"You must either provide serialized model or values for control "
"and test intervals."
)
self.control_interval: Optional[ChangePointInterval] = None
self.test_interval: Optional[ChangePointInterval] = None
self.response: Optional[AnomalyResponse] = None
self.is_initialized = False # flag on whether initialized or not
self.last_N = 0 # this is the size of the last chunk of data we saw
self.data_history: Optional[TimeSeriesData] = None
# for seasonality
self.data_season: Optional[TimeSeriesData] = None
# big data strategy
self.bigdata_trans_flag: Optional[bool] = None
self.remaining: Optional[int] = None
def serialize(self) -> bytes:
"""
Serializes by putting model parameters in a json
"""
model_dict = {
"n_control": self.n_control,
"n_test": self.n_test,
"time_unit": self.time_unit,
"rem_season": self.rem_season,
"seasonal_period": self.seasonal_period,
"use_corrected_scores": self.use_corrected_scores,
"max_split_ts_length": self.max_split_ts_length,
}
return json.dumps(model_dict).encode("utf-8")
def fit_predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> AnomalyResponse:
"""
This is the main working function.
The function returns an AnomalyResponse object of length
equal to the length of the data.
We require len(historical_data) > (n_control + n_test).
Args:
data: TimeSeriesData, A univariate TimeSeriesData for which we are running the StatSigDetectorModel
historical_data: Optional[TimeSeriesData] Historical data used to do detection for initial points in data
"""
if not data.is_univariate():
msg = "Input is multivariate but StatSigDetector expected univariate input."
logging.error(msg)
raise ValueError(msg)
self._set_time_unit(data=data, historical_data=historical_data)
self.last_N = len(data)
# this ensures we start with a default response of
# the size of the data
self._init_response(data)
response = self.response
assert response is not None
# when there is no need to update
# just return the initial response of zeros
if not self._should_update(data=data, historical_data=historical_data):
return response
# handle cases where there is either no historical data, or
# not enough historical data
data, historical_data = self._handle_not_enough_history(
data=data,
historical_data=historical_data,
)
# remove seasonality
if self.rem_season:
sh_data = SeasonalityHandler(
data=data, seasonal_period=self.seasonal_period
)
self.data_season = sh_data.get_seasonality()
data = sh_data.remove_seasonality()
if historical_data:
sh_hist_data = SeasonalityHandler(
data=historical_data,
seasonal_period=self.seasonal_period,
)
historical_data = sh_hist_data.remove_seasonality()
self.data = data
# first initialize this with the historical data
self._init_data(historical_data)
# if using new t-scores
if self.use_corrected_scores:
if (
len(data) > self.max_split_ts_length
# pyre-ignore[16]: `Optional` has no attribute `time`.
and pd.infer_freq(historical_data.time) == pd.infer_freq(data.time)
):
self.bigdata_trans_flag = True
else:
self.bigdata_trans_flag = False
else:
self.bigdata_trans_flag = False
# if need trans to multi-TS
if self.bigdata_trans_flag:
new_data_ts = self._reorganize_big_data(self.max_split_ts_length)
ss_detect = MultiStatSigDetectorModel(
n_control=self.n_control,
n_test=self.n_test,
time_unit=self.time_unit,
rem_season=False,
seasonal_period=self.seasonal_period,
skip_rescaling=True,
use_corrected_scores=self.use_corrected_scores,
)
anom = ss_detect.fit_predict(data=new_data_ts)
self._reorganize_back(anom)
else:
self._init_control_test(
data if historical_data is None else historical_data
)
# set the flag to true
self.is_initialized = True
# now run through the data to get the prediction
for i in range(len(data)):
current_time = data.time.iloc[i]
ts_pt = TimeSeriesData(
time=pd.Series(current_time, copy=False),
value=pd.Series(data.value.iloc[i], copy=False),
)
self._update_data(ts_pt)
self._update_control_test(ts_pt)
self._update_response(ts_pt.time.iloc[0])
# add seasonality back
if self.rem_season:
data_season = self.data_season
confidence_band = response.confidence_band
predicted_ts = response.predicted_ts
assert data_season is not None
assert confidence_band is not None
assert predicted_ts is not None
start_idx = len(response.scores) - len(data_season)
datatime = response.scores.time
self.response = AnomalyResponse(
scores=response.scores,
confidence_band=ConfidenceBand(
upper=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
confidence_band.upper.value.values[:start_idx],
copy=False,
),
pd.Series(
np.asarray(
confidence_band.upper.value.values[start_idx:]
)
+ np.asarray(data_season.value.values),
copy=False,
),
],
copy=False,
),
),
lower=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
confidence_band.lower.value.values[:start_idx],
copy=False,
),
pd.Series(
np.asarray(
confidence_band.lower.value.values[start_idx:]
)
+ np.asarray(data_season.value.values),
copy=False,
),
],
copy=False,
),
),
),
predicted_ts=TimeSeriesData(
time=datatime,
value=pd.concat(
[
pd.Series(
predicted_ts.value.values[:start_idx], copy=False
),
pd.Series(
np.asarray(predicted_ts.value.values[start_idx:])
+ np.asarray(data_season.value.values),
copy=False,
),
],
copy=False,
),
),
anomaly_magnitude_ts=response.anomaly_magnitude_ts,
stat_sig_ts=response.stat_sig_ts,
)
assert self.response is not None
return self.response.get_last_n(self.last_N)
def _reorganize_big_data(self, max_split_ts_length: int) -> TimeSeriesData:
data_history = self.data_history
data = self.data
assert data_history is not None
assert data is not None
first_half_len = len(data_history)
n_seq = len(data) // max_split_ts_length + int(
len(data) % max_split_ts_length > 0
)
remaining = (max_split_ts_length * n_seq - len(data)) % max_split_ts_length
time_need = pd.concat(
[data_history.time[:], data.time[:max_split_ts_length]],
copy=False,
)
new_ts = [
list(
pd.concat(
[data_history.value[:], data.value[:max_split_ts_length]],
copy=False,
)
)
]
for i in range(max_split_ts_length, len(data), max_split_ts_length):
new_ts.append(
new_ts[-1][-first_half_len:]
+ list(data.value[i : i + max_split_ts_length])
)
new_ts[-1] += [1] * remaining
new_data_ts = TimeSeriesData(
pd.DataFrame(
{
**{"time": time_need},
**{f"ts_{i}": new_ts[i] for i in range(len(new_ts))},
},
copy=False,
)
)
self.remaining = remaining
return new_data_ts
def _reorganize_back(self, anom: AnomalyResponse) -> None:
data_history = self.data_history
remaining = self.remaining
anom_predicted_ts = anom.predicted_ts
anom_confidence_band = anom.confidence_band
anom_stat_sig_ts = anom.stat_sig_ts
response = self.response
assert data_history is not None
assert remaining is not None
assert anom_predicted_ts is not None
assert anom_confidence_band is not None
assert anom_stat_sig_ts is not None
assert response is not None
response_predicted_ts = response.predicted_ts
assert response_predicted_ts is not None
start_point = len(data_history)
res_score_val = pd.Series(
pd.DataFrame(anom.scores.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
res_predicted_ts_val = pd.Series(
pd.DataFrame(anom_predicted_ts.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
res_anomaly_magnitude_ts_val = pd.Series(
pd.DataFrame(anom.anomaly_magnitude_ts.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
res_stat_sig_ts_val = pd.Series(
pd.DataFrame(anom_stat_sig_ts.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
res_confidence_band_lower_val = pd.Series(
pd.DataFrame(anom_confidence_band.lower.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
res_confidence_band_upper_val = pd.Series(
pd.DataFrame(anom_confidence_band.upper.value, copy=False)
.iloc[start_point:, :]
.values.T.flatten()[:-remaining],
copy=False,
)
datatime = response.scores.time
zeros = pd.Series(np.zeros(len(datatime) - len(res_score_val)), copy=False)
datavalues = pd.Series(
response_predicted_ts.value.values[: len(datatime) - len(res_score_val)],
copy=False,
)
self.response = AnomalyResponse(
scores=TimeSeriesData(
time=datatime, value=pd.concat([zeros, res_score_val], copy=False)
),
confidence_band=ConfidenceBand(
upper=TimeSeriesData(
time=datatime,
value=pd.concat(
[datavalues, res_confidence_band_upper_val], copy=False
),
),
lower=TimeSeriesData(
time=datatime,
value=pd.concat(
[datavalues, res_confidence_band_lower_val], copy=False
),
),
),
predicted_ts=TimeSeriesData(
time=datatime,
value=pd.concat([datavalues, res_predicted_ts_val], copy=False),
),
anomaly_magnitude_ts=TimeSeriesData(
time=datatime,
value= | pd.concat([zeros, res_anomaly_magnitude_ts_val], copy=False) | pandas.concat |
"""
--------------------------
OFFLINE OPTIMAL BENCHMARK:
---------------------------
It uses IBM CPLEX to maximise the social walfare of a current network structure and task list by solving the current environment given the usual problem restrictions.
This represents the upper bound of the social walfare.
In order to use the benchmark, call the offline_optimal() function with the required parameters.
"""
from numpy.core.fromnumeric import take
import pandas as pd
import numpy as np
from tqdm import tqdm
from docplex.mp.model import Model as Model
from docplex.mp.environment import Environment
def as_df(cplex_solution, name_key='name', value_key='value'):
""" Converts the solution to a pandas dataframe with two columns: variable name and values
:param name_key: column name for variable names. Default is 'name'
:param value_key: cilumn name for values., Default is 'value'.
:return: a pandas dataframe, if pandas is present.
"""
assert name_key
assert value_key
assert name_key != value_key
try:
import pandas as pd
except ImportError:
raise ImportError(
'Cannot convert solution to pandas.DataFrame if pandas is not available')
names = []
values = []
for dv, dvv in cplex_solution.iter_var_values():
names.append(dv.to_string())
values.append(dvv)
name_value_dict = {name_key: names, value_key: values}
return | pd.DataFrame(name_value_dict) | pandas.DataFrame |
"""
Topic: Claims Grouping Analysis Exercise
Author: <NAME>
Date Created: 05/10/2018
"""
import gc
import os
import sys
import pyodbc
import random
import textwrap
import warnings
import numpy as np
import pandas as pd
from time import time
from time import sleep
from copy import deepcopy
from datetime import timedelta
from contextlib import suppress
from subprocess import call
from subprocess import check_output
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
homeDir = r'C:\path\to\project\dir'
sys.path.append(homeDir)
from sklearn.cluster import KMeans
import BIC
def checkForBIC():
modName = r'BIC'
if modName not in sys.modules:
try:
from ..HelperScripts import BIC
except:
from .HelperScripts import BIC
if modName not in sys.modules:
try:
from MyModules.Stats import BIC
except ImportError:
print('Warning! Module {} not imported!'.format(modName))
pd.options.mode.chained_assignment = None
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.float_format', lambda x: '%.4f' % x)
gc.enable()
mainDir = r'C:\path\to\project\dir' # Folder for main project
tempOutPath = r'C:\path\to\temporary\dir' # Folder for holding temporary files.
eLog = r'C:\path\to\project\dir\errors.txt'
mySchema = r'MyTestDB'
tblName = 'DataClusters'
query1 = textwrap.dedent("""
""")
def getSqlData(input_query=query1):
size = 0
chunksize = 100000
chunkList = []
print('Reading data from SQL Server...')
server = 'my_server'
database = 'MyTestDB'
conn = pyodbc.connect((r'DRIVER={SQL Server Native Client 11.0};SERVER={server=};DATABASE={database=};MARS_Connection=Yes;Trusted_Connection=yes'))
conn.autocommit = True
cursor = conn.cursor()
cursor.execute(input_query)
conn.commit()
query2 = "SELECT * FROM ##_temp_table"
print('Current progress:')
for chunk in pd.read_sql_query(query2, conn, chunksize=chunksize):
size += chunksize
print('\t~{:,} rows read'.format(size))
chunkList.append(chunk)
df = | pd.concat(chunkList, axis=0) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
Timestamp("20130101"),
Timestamp("20130103"),
Timestamp("20130102"),
Timestamp("20130105"),
],
Timestamp("20130104"),
),
(["1one", "3one", "2one", "5one"], "4one"),
],
)
def test_sort_index_multilevel_repr_8017(self, gen, extra):
np.random.seed(0)
data = np.random.randn(3, 4)
columns = MultiIndex.from_tuples([("red", i) for i in gen])
df = DataFrame(data, index=list("def"), columns=columns)
df2 = pd.concat(
[
df,
DataFrame(
"world",
index=list("def"),
columns=MultiIndex.from_tuples([("red", extra)]),
),
],
axis=1,
)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ["red"]
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[("red", extra)] = "world"
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories",
[
pytest.param(["a", "b", "c"], id="str"),
pytest.param(
[pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)],
id="pd.Interval",
),
],
)
def test_sort_index_with_categories(self, categories):
# GH#23452
df = DataFrame(
{"foo": range(len(categories))},
index=CategoricalIndex(
data=categories, categories=categories, ordered=True
),
)
df.index = df.index.reorder_categories(df.index.categories[::-1])
result = df.sort_index()
expected = DataFrame(
{"foo": reversed(range(len(categories)))},
index=CategoricalIndex(
data=categories[::-1], categories=categories[::-1], ordered=True
),
)
tm.assert_frame_equal(result, expected)
class TestDataFrameSortIndexKey:
def test_sort_multi_index_key(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
).set_index(list("abc"))
result = df.sort_index(level=list("ac"), key=lambda x: x)
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
).set_index(list("abc"))
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import pickle
from sklearn.decomposition import IncrementalPCA, MiniBatchDictionaryLearning
import gc
def load_subject(subject_filename):
with open(subject_filename, 'rb') as f:
subject_data = pickle.load(f)
return subject_data
class ImageLoader():
def __init__(self, transforms=None):
self.transforms = transforms
pass
def transform(self, X, y=None):
X = load_subject(X)
if self.transforms is not None:
X = self.transforms(image=X)['image']
return X
def main():
parser = argparse.ArgumentParser(description='train pca and dl features')
parser.add_argument('--data-path', default='./data/raw',
help='path to original data, default ./data/raw')
parser.add_argument('--imgs-path', default='./data/imgs',
help='path to resaved images, default ./data/imgs')
parser.add_argument('--path-to-save', default='./data/features',
help='path to save features, default ./data/features')
parser.add_argument('--path-to-save-model', default='./models/pca',
help='path to save models, default ./models/pca')
args = parser.parse_args()
data_path = args.data_path
imgs_path = args.imgs_path
path_to_save = args.path_to_save
path_to_save_model = args.path_to_save_model
for _path in [path_to_save, path_to_save_model,
os.path.join(path_to_save, '100dl_feats'),
os.path.join(path_to_save, '200pca_feats')]:
if not os.path.exists(_path):
os.makedirs(_path)
loading = pd.read_csv(os.path.join(data_path, 'loading.csv'), index_col = ['Id'])
# creates pathes to all images
img_path = pd.DataFrame(index=loading.index, columns=['path'])
for index in img_path.index:
path = str(index) + '.npy'
img_path.loc[index, 'path'] = os.path.join(imgs_path, path)
# start train and inference of pca feats
print('PCA started. ~13 hours')
for k in range(0, 6):
##fit
pca = IncrementalPCA(n_components=200)
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 200 == 0:
batch = np.array(batch)
pca.partial_fit(batch)
del batch
gc.collect()
batch = []
##save pca
_p = os.path.join(path_to_save_model, f'200pca_3d_k{k}.pickle')
with open(_p, 'wb') as f:
pickle.dump(pca, f)
##transform
res = []
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 200 == 0:
batch = np.array(batch)
res.append(pca.transform(batch))
del batch
gc.collect()
batch = []
lb = len(batch)
if lb > 0:
batch = np.array(batch)
if lb == 1:
res.append(pca.transform(batch.reshape(1, -1)))
else:
res.append(pca.transform(batch))
##save df
res = np.array(res)
df_res = pd.DataFrame(np.vstack(res), index=loading.index, columns=[f'200PCA_k{k}_' + str(i) for i in range(200)])
_p = os.path.join(path_to_save, f'200pca_feats/200pca_3d_k{k}.csv')
df_res.to_csv(_p)
print('Dictionary learning started. ~47 hours')
n_k = 100
for k in range(0, 6):
##fit
pca = MiniBatchDictionaryLearning(n_components=n_k, random_state=0, n_iter=10, batch_size=n_k)
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 100 == 0:
batch = np.array(batch)
pca.partial_fit(batch)
del batch
gc.collect()
batch = []
##save pca
_p = os.path.join(path_to_save_model, f'dl_3d_k{k}.pickle')
with open(_p, 'wb') as f:
pickle.dump(pca, f)
##transform
res = []
batch = []
for n, i in enumerate(tqdm(img_path.values)):
f = ImageLoader().transform(i[0])
f = f[k*10:(k+1)*10].flatten()
batch.append(f)
if (n + 1) % 100 == 0:
batch = np.array(batch)
res.append(pca.transform(batch))
del batch
gc.collect()
batch = []
lb = len(batch)
if lb > 0:
batch = np.array(batch)
if lb == 1:
res.append(pca.transform(batch.reshape(1, -1)))
else:
res.append(pca.transform(batch))
##save df
res = np.array(res)
df_res = pd.DataFrame(np.vstack(res), index=loading.index, columns=[f'dl_k{k}_' + str(i) for i in range(n_k)])
_p = os.path.join(path_to_save, f'100dl_feats/dl_3d_k{k}.csv')
df_res.to_csv(_p)
#resave results
_p = os.path.join(path_to_save, '100dl_feats/dl_3d_k0.csv')
data_pca = pd.read_csv(_p)
for i in range(1, 6):
_p = os.path.join(path_to_save, '100dl_feats/dl_3d_k{}.csv'.format(i))
part = | pd.read_csv(_p) | pandas.read_csv |
"""
<NAME>
<EMAIL>
<EMAIL>
"""
"""
This is used to generate images containing data from a Slifer Lab NMR cooldown.
The NMR analysis toolsuite produces a file called "global_analysis.csv" which this program needs
in tandem with the raw DAQ .csv to form an image sequence that captures the cooldown datastream.
"""
import pandas, os, numpy, multiprocessing, numpy, time, matplotlib, sys
from matplotlib import pyplot as plt
sys.path.insert(1, '..')
import variablenames
# Sept 14 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-14-2020/video/"
flist = ['data_record_9-14-2020_abridged.csv', 'data_record_9-15-2020_abridged.csv']
daqdatafile = ["../datasets/sep_2020/rawdata/"+i for i in flist]
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+"saveme_9_14.csv"
spline_df_location = rootdir+"spline_df_saved_9_14.csv"
rawsig_ym, rawsig_YM = -4, 4
fitsub_xm, fitsub_XM = 32.4,33.4
fitsub_ym, fitsub_YM= -.2, 1.5
poor_fit_ym, poor_fit_YM = -1.6,-.8
"""
# Dec 3 2020 Data
rootdir = "../datasets/dec_2020/data_record_12-3-2020/"
daqdatafile = '../datasets/dec_2020/rawdata/data_record_12-3-2020_abriged.csv'
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_3_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.55,33.45
fitsub_ym, fitsub_YM = -.4, .2
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -1,1
# Dec 4 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-4-2020/analysis/polarization/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_4_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.55,33.45
fitsub_ym, fitsub_YM = -.075, .075
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -1,1
"""
# Dec 7 2020
"""
rootdir = "../datasets/dec_2020/data_record_12-7-2020/analysis/Enhanced/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_7_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.85,32.55
fitsub_ym, fitsub_YM = -.3, .2
rawsig_ym, rawsig_YM = -2, 2
poor_fit_ym, poor_fit_YM = -.6,-.1
"""
# Dec 8 2020
"""rootdir = "../datasets/dec_2020/data_record_12-8-2020/analysis/enhanced/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_8_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.45,212.94
fitsub_ym, fitsub_YM = -.01, .02
rawsig_ym, rawsig_YM = -.3, .3
poor_fit_ym, poor_fit_YM = -.018,-.01
"""
# Dec 9 2020
"""
rootdir = "../datasets/dec_2020/data_record_12-9-2020/video/"
csvdirectory = rootdir+"graph_data/"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_9_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.45,212.94
fitsub_ym, fitsub_YM = -.01, .02
rawsig_ym, rawsig_YM = -.3, .3
poor_fit_ym, poor_fit_YM = -.018,-.01
poor_fit_ym, poor_fit_YM = -.005,-.03
"""
# Dec 10 2020 data
"""
csvdirectory = "../datasets/dec_2020/data_record_12-10-2020/video_analysis/graph_data/"
globalcsv2 = "../datasets/dec_2020/data_record_12-10-2020/video_analysis/global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = 'datasets/2020_12_10/saveme_12_10_20.csv'
spline_df_location = 'datasets/2020_12_10/spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 11 2020 data
"""
rootdir = "../datasets/dec_2020/data_record_12-11-2020/video/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_9_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Sept 12 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-12-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-12-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 11 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-11-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-11-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-10-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2_redo.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 12 2020 data #FOR TE FIXING
"""
rootdir = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-12-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/912_536pTE/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-12-2020_old_analysis/TE/912_536pTE/global_analysis_with_extra_stuff.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_12_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 11 2020 data
"""
rootdir = "../datasets/sep_2020/data_record_9-11-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_11_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 13 2020 Dat
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = "../datasets/sep_2020/rawdata/data_record_9-13-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 15 2020 Data #enhanced
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-15-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-14-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-15-2020/video/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-15-2020/video/global_analysis_long_fixed.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4
"""
# Sept 15 2020 Data #te
"""
rootdir = "../datasets/sep_2020/data_record_9-13-2020/video/"
daqdatafile = ["../datasets/sep_2020/rawdata/data_record_9-15-2020_abridged.csv", "../datasets/sep_2020/rawdata/data_record_9-14-2020_abridged.csv"]
csvdirectory = rootdir+"graph_data/"
globalcsv = "../datasets/sep_2020/data_record_9-14-2020_old_analysis/700pte/7p_lab/global_analysis.csv"
globalcsv2 = "../datasets/sep_2020/data_record_9-14-2020_old_analysis/700pte/7p_lab/global_analysis_long_fixed.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_9_13_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 31.95,32.85
fitsub_ym, fitsub_YM = -.05, .5
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,.4"""
# Dec 10 2020 Data null
"""
rootdir = "../datasets/dec_2020/data_record_12-10-2020/Complete_analysis/null_pure/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-10-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_10_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 10 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-10-2020/Complete_analysis/enhanced_pure/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-10-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Third order Polynomial 0 Subtraction'
karlmethod = rootdir+'saveme_12_10_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 11 2020 Data
"""
rootdir = "../datasets/dec_2020/data_record_12-11-2020/video/"
daqdatafile = "../datasets/dec_2020/rawdata/data_record_12-11-2020_abridged.csv"
csvdirectory = rootdir+"graph_data/"
globalcsv = rootdir+"global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_11_20.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 32.6,33.5
fitsub_ym, fitsub_YM = -.2, .25
rawsig_ym, rawsig_YM = -4, 3.5
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 19 2019 Data
"""
rootdir = "../datasets/dec_2019/vme_data/data_record_12-19-2019/"
daqdatafile = rootdir+"../rawdata/data_record_12-19-2019_abridged.csv"
csvdirectory = rootdir+"Results/enhanced/graph_data/"
globalcsv = rootdir+"enhanced_global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_19_19.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 212.7,213.15
fitsub_ym, fitsub_YM = -.02, .04
rawsig_ym, rawsig_YM = -.2, .2
poor_fit_ym, poor_fit_YM = -.5,0
"""
# Dec 21 2019 Data
rootdir = "../datasets/dec_2019/vme_data/data_record_12-21-2019/"
daqdatafile = rootdir+"../rawdata/data_record_12-21-2019_abridged.csv"
csvdirectory = rootdir+"results/enhanced/graph_data/"
globalcsv = rootdir+"enhanced_global_analysis.csv"
globalcsv2 = rootdir+"global_analysis_2.csv"
yfitsub = 'Fit 1 Subtraction'
karlmethod = rootdir+'saveme_12_19_19.csv'
spline_df_location = rootdir+'spline_df.csv'
fitsub_xm, fitsub_XM = 211.7,212.5
fitsub_ym, fitsub_YM = -.02, .06
rawsig_ym, rawsig_YM = -.2, .2
poor_fit_ym, poor_fit_YM = -.5,0
dump = "../dump3/"
thermistor1 ="CCS.F11 (K)"
thermistor2 ="CCS.F10 (K)"
thermistor3 ="CCX.T1 (K)"
thermistor4 ="CX.T2 (K)"
NMR_Variable = "Diode Tune (V)"
NMR_Tune = 'Phase Tune (V)'
NMR_Performance_Metric = NMR_Variable#'IFOFF (V)'
columns_to_absolutely_save = [thermistor1, NMR_Tune, NMR_Variable, thermistor2,
thermistor3, "UCA Voltage (V)", "Mmwaves Frequency (GHz)",
thermistor4]
raw = "Potential (V)"
x = "MHz"
bl = "BL Potential (V)"
def forkitindexer(filelist):
"""
Return a list of tuples of indecies that divide the passed
list into almost equal slices
"""
p = int(8*multiprocessing.cpu_count()/10)
lenset = len(filelist)
modulus = int(lenset%p)
floordiv = int(lenset/p)
slicer = [[floordiv*i, floordiv*(i+1)] for i in range(p-1)]
slicer.append([floordiv*(p-1), p*floordiv+int(modulus)-1])
return slicer
def plotter(files, indexes, times, ga_fixed, id_num, deltas, timesteps, deltastime, tkbackend):
deltasx = 'time'
deltasy = 'sum'
if tkbackend == 'on':
pass
elif tkbackend == 'off':
matplotlib.use('Agg')
s,f = indexes
todo = files[s:f]
timedeltas = []
for i, val in enumerate(todo):
t1 = time.time()
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(16, 8), constrained_layout=True)
fig.suptitle(str(times[s+i]))
with open(csvdirectory+val, 'r') as f:
df = pandas.read_csv(f)
ss = ga_fixed.loc[times[s+i], 'sigstart']
sf = ga_fixed.loc[times[s+i], 'sigfinish']
signal_removed_df = df[(df[x]>ss) & (df[x]<sf)]
ax[0,0].scatter(df[x], df[yfitsub], label='Fit Subtracted Signal', color='blue')
ax[0,0].scatter(signal_removed_df[x], signal_removed_df[yfitsub], label='User Selected Region', color='red')
ax[0,0].legend(loc='best')
ax[0,0].set_title("Fit Subtracted Signal")
ax[0,0].set_ylabel('Volts (V)')
ax[0,0].set_xlabel('Frequency (MHz)')
ax[0,0].set_ylim(fitsub_ym, fitsub_YM)
ax[0,1].set_title('Temperature')
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor2], color='red', label=thermistor2)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor3], color='orange', label=thermistor3)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor1], color='green', label=thermistor1)
ax[0,1].scatter(ga_fixed.index.tolist(), ga_fixed[thermistor4], color='blue', label=thermistor4)
ax[0,1].set_ylim(-.5, 7)
ax[0,1].set_ylabel('Kelvin (K)')
ax[0,1].set_xlabel('Time')
ax[1,0].set_title("Raw Sweeps")
ax[1,0].scatter(df[x], df[bl], label='Baseline', color='blue')
ax[1,0].scatter(df[x], df[raw], label =''.join(list(val)[:-4]), color = 'red')
ax[1,0].set_ylim(rawsig_ym, rawsig_YM)
ax[1,0].set_ylabel('Volt')
ax[1,0].set_xlabel('Frequency (MHz)')
ax[1,1].scatter(ga_fixed.index.tolist(), ga_fixed['data_area'], color='green', label='Enhanced Data Area')
ax[1,1].set_title("Data Area")
#ax[1,1].set_ylim(-.025,.05)
ax[1,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], 'data_area'], color='magenta', label='Current sweep')
ax[1,1].set_ylabel('Volt-Area')
ax[1,1].set_xlabel('Time')
ax[0,2].scatter(ga_fixed.index.tolist(), ga_fixed[NMR_Performance_Metric], label=NMR_Performance_Metric)
ax[0,2].scatter(timesteps[s+i], ga_fixed.loc[times[s+i],NMR_Performance_Metric], color='magenta', label="Current Sweep")
ax[0,2].grid(True)
ax[0,2].legend(loc='best')
ax[0,2].set_title("VME & Microwave Stuff")
ax[0,2].set_ylabel('Volts (V)')
ax[0,2].set_xlabel('Time')
#ax[1,2].set_ylim(poor_fit_ym, poor_fit_YM)
ax[1,2].scatter(deltastime, deltas[deltasy], label="Signal-Wing avg value")
if timesteps[s+i] in deltastime:
ax[1,2].scatter(timesteps[s+i], deltas.loc[times[s+i], deltasy], color='magenta', label="Current Sweep")
ax[1,2].grid(True)
ax[1,2].legend(loc='best')
ax[1,2].set_title("Poor-fit indicator prototype")
ax[1,2].set_xlabel('Time')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor2], color='magenta', label="Current Sweep")
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor3], color='blue')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor1], color='magenta')
ax[0,1].scatter(timesteps[s+i], ga_fixed.loc[times[s+i], thermistor4], color='magenta')
ax[0,0].grid(True)
ax[1,0].grid(True)
ax[1,1].grid(True)
ax[0,1].grid(True)
ax[0,1].legend(loc='best')
ax[0,0].legend(loc='best')
ax[1,0].legend(loc='best')
ax[1,1].legend(loc='best')
plt.savefig(dump+str("{0:05d}".format(s+i)))
plt.clf()
plt.close('all')
t2 = time.time()
timedeltas.append(t2-t1)
print('ID:', id_num, ":", (i+1), "of", len(todo), '['+str(round((i+1)*100/len(todo),4))+'%]', "ETA: ", round((len(todo)-(i+1))*numpy.mean(timedeltas),1), 's')
def get_csv_files():
csvs = []
for root, dirs, files in os.walk(csvdirectory):
for f in files:
if f.endswith('.csv'):
csvs.append(''.join(list(f)[:-4])) # removes suffixes
return csvs
def get_global_analysis():
with open(globalcsv2, 'r') as f:
df = pandas.read_csv(f)
name = 'name'
# Set the indexer as the user-defined name of the analyzed instance
# which reflects the file names gotten in the function get_csv_files()
dffixed = df.set_index(name)
return dffixed
def sync_timestamps_with_csv_filenames(dffixed, csvs):
timesteps = []
keys = []
for i, index in enumerate(csvs):
try:
timesteps.append(dffixed.loc[index, 'time'])
keys.append(index+'.csv')
except KeyError as e:
print("Key error", e, "file exists, but no entry in global analysis.")
continue
corrected_DF = pandas.DataFrame(dict(zip(['keys', 'time'],[keys, timesteps])))
sorted_df = corrected_DF.sort_values(by='time')
return sorted_df
def cutter(ga_csv, sorted_df, tolerance):
import cutter as cutter2
minn = -.3
maxx=-.23
deltasx = 'time'
deltasy = 'sum'
deltasmin = 'spline min'
deltasmax = 'spline max'
edited = input("do you need to subsect (CUT) the data? [Y/N]: ")
edited = True if edited.upper() == 'Y' else False
if edited:
try:
with open(spline_df_location, 'r') as f:
deltas = pandas.read_csv(f)
except:
deltas = cutter2.main(tolerance=tolerance, neededpath=karlmethod, global_analysis=globalcsv2)
with open(spline_df_location, 'w') as f:
deltas.to_csv(f)
else:
with open(karlmethod, 'r') as f:
deltas = pandas.read_csv(f)
deltas = deltas.sort_values(by=deltasx)
deltas[deltasx] = pandas.to_datetime(deltas[deltasx],format="%Y-%m-%d %H:%M:%S")
ga_csv['time'] = | pandas.to_datetime(ga_csv['time'], format="%Y-%m-%d %H:%M:%S") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 11:30:55 2019
@author: <NAME>
"""
# import the necessary packages
import cv2
from PIL import Image
import numpy as np
import datetime
import os
import pandas as pd
#%% Set the output file location
run_data = datetime.datetime.now().strftime("%Y_%m_%d")
result_path=r"./Fig4_{}/".format(run_data)
if not os.path.exists(result_path):
os.makedirs(result_path)
#%% Read Traget Folders' Path
labels=['neutrophyl','lymphocyte']
#base_path = r'E:\DeepLearning\Mikami\Generate\White Cell'
base_path = r'./Whitecell'
file_list_lym = []
file_list_neu = []
for root, dirs, files in os.walk(base_path):
for file in files:
if file.endswith(".tif"):
filename = os.path.join(root, file)
file_size = os.path.getsize(filename)
category_name = os.path.basename(root)
if category_name == labels[0]:
file_list_neu.append(filename)
else :
file_list_lym.append(filename)
#%% Read image files and put in a list
data_number = 2500
label='lymphocyte' # 'lymphocyte' or 'neutrophyl'
data_of_lym_name = []
data_of_lym_nucleus = []
data_of_lym_box = []
data_of_lym_ratio = []
for i, filename in enumerate(file_list_lym[:data_number]):
threadhold=140
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<threadhold]=0
image = imarray[:,:,1]
# Find the enclosing box and draw it
x, y, w, h = cv2.boundingRect(imarray[:,:,1])
cv2.rectangle(imarray, (x,y), (x+w,y+h), (0,0,255), 1)
imarray[:,:,0]=0
# Save the resluts as png files
# cv2.imwrite('{}\Result_of_{}_{}_all.png'.format(label,label,i), imarray)
# cv2.imwrite('{}\Result_of_{}_{}_box.png'.format(label,label,i), imarray[y-5:y+h+5,x-5:x+w+5,:])
# Calculate the enclosing box area
box_area=w*h
if box_area ==0:
box_area = 1
# Read the image file again (for insure) and calculate the nucleus area
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<threadhold]=0
image = imarray[:,:,1]
cell_area=np.count_nonzero(imarray[y-5:y+h+5,x-5:x+w+5,1])
# Temp. the resluts
data_of_lym_nucleus.append(cell_area)
data_of_lym_box.append(box_area)
data_of_lym_ratio.append(cell_area/box_area)
label='neutrophyl' # 'lymphocyte' or 'neutrophyl'
data_of_neu_name = []
data_of_neu_nucleus = []
data_of_neu_box = []
data_of_neu_ratio =[]
for i, filename in enumerate(file_list_neu[:data_number]):
threadhold=140
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<threadhold]=0
image = imarray[:,:,1]
# Find the enclosing box and draw it
x, y, w, h = cv2.boundingRect(imarray[:,:,1])
cv2.rectangle(imarray, (x,y), (x+w,y+h), (0,0,255), 1)
imarray[:,:,0]=0
# Save the resluts as png files
# cv2.imwrite('{}\Result_of_{}_{}_all.png'.format(label,label,i), imarray)
# cv2.imwrite('{}\Result_of_{}_{}_box.png'.format(label,label,i), imarray[y-5:y+h+5,x-5:x+w+5,:])
# Calculate the enclosing box area
box_area=w*h
if box_area ==0:
box_area =1
# Read the image file again (for insure) and calculate the nucleus area
im = Image.open(filename)
imarray = np.array(im)
imarray[imarray<threadhold]=0
image = imarray[:,:,1]
cell_area=np.count_nonzero(imarray[y-5:y+h+5,x-5:x+w+5,1])
# Temp. the resluts
data_of_neu_nucleus.append(cell_area)
data_of_neu_box.append(box_area)
data_of_neu_ratio.append(cell_area/box_area)
#%% Remove zeros
data_of_lym_ratio=np.asarray(data_of_lym_ratio)
data_of_neu_ratio=np.asarray(data_of_neu_ratio)
data_of_lym_ratio=data_of_lym_ratio[data_of_lym_ratio>0]
data_of_neu_ratio=data_of_neu_ratio[data_of_neu_ratio>0]
#%% Save the Results
data = {'lymphocyte':data_of_lym_ratio}
df1 = | pd.DataFrame(data) | pandas.DataFrame |
#%%
import time
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from graspologic.plot import pairplot
from graspologic.utils import get_lcc, pass_to_ranks, to_laplace
from sparse_decomposition import SparseMatrixApproximation
from src.visualization import CLASS_COLOR_DICT
import matplotlib as mpl
sns.set_context("talk")
mpl.rcParams["axes.spines.top"] = False
mpl.rcParams["axes.spines.right"] = False
fig_dir = Path("sparse_new_basis/experiments/maggot/figs")
def stashfig(name, dpi=300, fmt="png", pad_inches=0.5, facecolor="w", **kws):
plt.savefig(
fig_dir / name,
dpi=dpi,
fmt=fmt,
pad_inches=pad_inches,
facecolor=facecolor,
**kws,
)
data_dir = Path("sparse_new_basis/data/maggot")
g = nx.read_weighted_edgelist(
data_dir / "G.edgelist", create_using=nx.DiGraph, nodetype=int
)
meta = | pd.read_csv(data_dir / "meta_data.csv", index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import simplejson as json
import six
import os
from pandas.api.types import is_integer_dtype
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
import h5py
from .core import (
get,
region_to_offset,
region_to_extent,
RangeSelector1D,
RangeSelector2D,
CSRReader,
query_rect,
)
from .util import parse_cooler_uri, parse_region, open_hdf5, closing_hdf5
from .fileops import list_coolers
__all__ = ["Cooler", "annotate"]
# The 4DN data portal and hic2cool store these weight vectors in divisive form
_4DN_DIVISIVE_WEIGHTS = {"KR", "VC", "VC_SQRT"}
class Cooler(object):
"""
A convenient interface to a cooler data collection.
Parameters
----------
store : str, :py:class:`h5py.File` or :py:class:`h5py.Group`
Path to a cooler file, URI string, or open handle to the root HDF5
group of a cooler data collection.
root : str, optional [deprecated]
HDF5 Group path to root of cooler group if ``store`` is a file.
This option is deprecated. Instead, use a URI string of the form
:file:`<file_path>::<group_path>`.
kwargs : optional
Options to be passed to :py:class:`h5py.File()` upon every access.
By default, the file is opened with the default driver and mode='r'.
Notes
-----
If ``store`` is a file path, the file will be opened temporarily in
when performing operations. This allows :py:class:`Cooler` objects to be
serialized for multiprocess and distributed computations.
Metadata is accessible as a dictionary through the :py:attr:`info`
property.
Table selectors, created using :py:meth:`chroms`, :py:meth:`bins`, and
:py:meth:`pixels`, perform range queries over table rows,
returning :py:class:`pd.DataFrame` and :py:class:`pd.Series`.
A matrix selector, created using :py:meth:`matrix`, performs 2D matrix
range queries, returning :py:class:`numpy.ndarray` or
:py:class:`scipy.sparse.coo_matrix`.
"""
def __init__(self, store, root=None, **kwargs):
if isinstance(store, six.string_types):
if root is None:
self.filename, self.root = parse_cooler_uri(store)
elif h5py.is_hdf5(store):
with open_hdf5(store, **kwargs) as h5:
self.filename = h5.file.filename
self.root = root
else:
raise ValueError("Not a valid path to a Cooler file")
self.uri = self.filename + "::" + self.root
self.store = self.filename
self.open_kws = kwargs
else:
# Assume an open HDF5 handle, ignore open_kws
self.filename = store.file.filename
self.root = store.name
self.uri = self.filename + "::" + self.root
self.store = store.file
self.open_kws = {}
self._refresh()
def _refresh(self):
try:
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
_ct = chroms(grp)
_ct["name"] = _ct["name"].astype(object)
self._chromsizes = _ct.set_index("name")["length"]
self._chromids = dict(zip(_ct["name"], range(len(_ct))))
self._info = info(grp)
mode = self._info.get("storage-mode", u"symmetric-upper")
self._is_symm_upper = mode == u"symmetric-upper"
except KeyError:
err_msg = "No cooler found at: {}.".format(self.store)
listing = list_coolers(self.store)
if len(listing):
err_msg += (
" Coolers found in {}. ".format(listing)
+ "Use '::' to specify a group path"
)
raise KeyError(err_msg)
def _load_dset(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return grp[path][:]
def _load_attrs(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return dict(grp[path].attrs)
def open(self, mode="r", **kwargs):
""" Open the HDF5 group containing the Cooler with :py:mod:`h5py`
Functions as a context manager. Any ``open_kws`` passed during
construction are ignored.
Parameters
----------
mode : str, optional [default: 'r']
* ``'r'`` (readonly)
* ``'r+'`` or ``'a'`` (read/write)
Notes
-----
For other parameters, see :py:class:`h5py.File`.
"""
grp = h5py.File(self.filename, mode, **kwargs)[self.root]
return closing_hdf5(grp)
@property
def storage_mode(self):
"""Indicates whether ordinary sparse matrix encoding is used
(``"square"``) or whether a symmetric matrix is encoded by storing only
the upper triangular elements (``"symmetric-upper"``).
"""
return self._info.get("storage-mode", u"symmetric-upper")
@property
def binsize(self):
""" Resolution in base pairs if uniform else None """
return self._info["bin-size"]
@property
def chromsizes(self):
""" Ordered mapping of reference sequences to their lengths in bp """
return self._chromsizes
@property
def chromnames(self):
""" List of reference sequence names """
return list(self._chromsizes.index)
def offset(self, region):
""" Bin ID containing the left end of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
int
Examples
--------
>>> c.offset('chr3') # doctest: +SKIP
1311
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_offset(
grp, self._chromids, parse_region(region, self._chromsizes)
)
def extent(self, region):
""" Bin IDs containing the left and right ends of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
2-tuple of ints
Examples
--------
>>> c.extent('chr3') # doctest: +SKIP
(1311, 2131)
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
@property
def info(self):
""" File information and metadata
Returns
-------
dict
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return info(grp)
@property
def shape(self):
return (self._info["nbins"],) * 2
def chroms(self, **kwargs):
""" Chromosome table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return chroms(grp, lo, hi, fields, **kwargs)
return RangeSelector1D(None, _slice, None, self._info["nchroms"])
def bins(self, **kwargs):
""" Bin table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return bins(grp, lo, hi, fields, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
return RangeSelector1D(None, _slice, _fetch, self._info["nbins"])
def pixels(self, join=False, **kwargs):
""" Pixel table selector
Parameters
----------
join : bool, optional
Whether to expand bin ID columns into chrom, start, and end
columns. Default is ``False``.
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return pixels(grp, lo, hi, fields, join, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
i0, i1 = region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
lo = grp["indexes"]["bin1_offset"][i0]
hi = grp["indexes"]["bin1_offset"][i1]
return lo, hi
return RangeSelector1D(None, _slice, _fetch, self._info["nnz"])
def matrix(
self,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=False,
ignore_index=True,
divisive_weights=None,
max_chunk=500000000,
):
""" Contact matrix selector
Parameters
----------
field : str, optional
Which column of the pixel table to fill the matrix with. By
default, the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing
the desired balancing weights. Set to False to return untransformed
counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID
columns into (chrom, start, end). Has no effect when requesting a
rectangular matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the
pixel IDs to improve performance. Default is True.
divisive_weights : bool, optional
Force balancing weights to be interpreted as divisive (True) or
multiplicative (False). Weights are always assumed to be
multiplicative by default unless named KR, VC or SQRT_VC, in which
case they are assumed to be divisive by default.
Returns
-------
Matrix selector
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If
``as_pixels=False``, those missing non-zero elements will
automatically be filled in.
"""
if balance in _4DN_DIVISIVE_WEIGHTS and divisive_weights is None:
divisive_weights = True
def _slice(field, i0, i1, j0, j1):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return matrix(
grp,
i0,
i1,
j0,
j1,
field,
balance,
sparse,
as_pixels,
join,
ignore_index,
divisive_weights,
max_chunk,
self._is_symm_upper,
)
def _fetch(region, region2=None):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
if region2 is None:
region2 = region
region1 = parse_region(region, self._chromsizes)
region2 = parse_region(region2, self._chromsizes)
i0, i1 = region_to_extent(grp, self._chromids, region1)
j0, j1 = region_to_extent(grp, self._chromids, region2)
return i0, i1, j0, j1
return RangeSelector2D(field, _slice, _fetch, (self._info["nbins"],) * 2)
def __repr__(self):
if isinstance(self.store, six.string_types):
filename = os.path.basename(self.store)
container = "{}::{}".format(filename, self.root)
else:
container = repr(self.store)
return '<Cooler "{}">'.format(container)
def info(h5):
"""
File and user metadata dict.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
Returns
-------
dict
"""
d = {}
for k, v in h5.attrs.items():
if isinstance(v, six.string_types):
try:
v = json.loads(v)
except ValueError:
pass
d[k] = v
return d
def chroms(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the chromosomes/scaffolds/contigs used.
They appear in the same order they occur in the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["name", "length"])
.append(pd.Index(h5["chroms"].keys()))
.drop_duplicates()
)
return get(h5["chroms"], lo, hi, fields, **kwargs)
def bins(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the genomic bins that make up the axes of the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["chrom", "start", "end"])
.append(pd.Index(h5["bins"].keys()))
.drop_duplicates()
)
# If convert_enum is not explicitly set to False, chrom IDs will get
# converted to categorical chromosome names, provided the ENUM header
# exists in bins/chrom. Otherwise, they will return as integers.
out = get(h5["bins"], lo, hi, fields, **kwargs)
# Handle the case where the ENUM header doesn't exist but we want to
# convert integer chrom IDs to categorical chromosome names.
if "chrom" in fields:
convert_enum = kwargs.get("convert_enum", True)
if isinstance(fields, six.string_types):
chrom_col = out
else:
chrom_col = out["chrom"]
if is_integer_dtype(chrom_col.dtype) and convert_enum:
chromnames = chroms(h5, fields="name")
chrom_col = | pd.Categorical.from_codes(chrom_col, chromnames, ordered=True) | pandas.Categorical.from_codes |
import pandas as pd
import numpy as np
import sklearn
import warnings
import sys
# sys.path.append('Feature Comparison/Basic.py')
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
Basic_Feature_path = 'Feature Comparison/Basic_FeatureSet.npy'
Botnet_Feature_path = 'Feature Comparison/Botnet_FeatureSet.npy'
Blacklist_Feature_path = 'Feature Comparison/Blacklist_FeatureSet.npy'
Whois_Feature_path = 'Feature Comparison/Whois_FeatureSet.npy'
Hostbased_Feature_path = 'Feature Comparison/Host_based_FeatureSet.npy'
Lexical_Feature_path = 'Feature Comparison/Lexical_FeatureSet.npy'
Full_ex_wb_path = 'Feature Comparison/Full_except_WB.npy'
warnings.filterwarnings("ignore", category=FutureWarning, module="sklearn", lineno=196)
warnings.filterwarnings("ignore", category=FutureWarning, module="sklearn", lineno=433)
warnings.filterwarnings("ignore", category=RuntimeWarning, module="sklearn", lineno=436)
warnings.filterwarnings("ignore", category=RuntimeWarning, module="sklearn", lineno=438)
| pd.set_option('display.max_columns', 10000) | pandas.set_option |
"""
All Features CV Analysis
"""
import sys
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import utils as u
#####################################################################################################
def run_module_2():
PATH = "./Metrics - 2/All Features CV Analysis (CC).pkl"
fontsize = 5.5
args = "--shortlist"
if args in sys.argv:
PATH = "./Metrics/Shortlisted Features CV Analysis.pkl"
fontsize = 12
with open(PATH, "rb") as fp:
params = pickle.load(fp)
df = pd.DataFrame()
for a in range(len(params)):
temp_df = | pd.DataFrame.from_dict(params[a]) | pandas.DataFrame.from_dict |
import json
import pandas as pd
class Teaproduction:
def __init__(self):
pass
def production(self):
frame_f = self.production_1()
frame_e = self.production_2()
frame = pd.concat([frame_f , frame_e])
frame.columns = ['生產重量']
frame.index.name = '日期'
return frame
def production_1(self):
# 處理 2003到2010年的茶葉產量,使用pandas開啟 excel
year = ['2003','2004','2005','2006','2007','2008','2009','2010']
tea ={}
for i in range(2,10):
df = pd.read_excel('9{:d}年.xls'.format(i))
tea[year[i-2]]=float(df['Unnamed: 6'][8])
frame = pd.DataFrame.from_dict(tea, orient='index')
return frame
def production_2(self):
# 處理 2011 到2019的茶葉產量
with open('DataFileService.json', encoding='utf8') as jsonfile:
data = json.load(jsonfile)
frame = | pd.DataFrame(data) | pandas.DataFrame |
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
import sqlite3
from bokeh.plotting import Figure
from bokeh.models import (
CategoricalColorMapper,
HoverTool,
ColumnDataSource,
Panel,
FuncTickFormatter,
SingleIntervalTicker,
LinearAxis,
Legend,
)
from bokeh.models.widgets import (
CheckboxGroup,
Slider,
RangeSlider,
Tabs,
CheckboxButtonGroup,
TableColumn,
DataTable,
Select,
)
from bokeh.layouts import column, row, WidgetBox
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def vmstat_tab(db):
def make_dataset(vmstat_list):
newdf = vmstat[vmstat_list]
# Convert dataframe to column data source
return ColumnDataSource(newdf)
def make_plot(src):
# Blank plot with correct labels
p = Figure(
plot_width=1024,
plot_height=768,
x_axis_type="datetime",
title="vmstat",
output_backend="webgl",
)
cm = plt.get_cmap("gist_rainbow")
numlines = len(vmstat.columns)
mypal = [cm(1.0 * i / numlines) for i in range(numlines)]
mypal = list(map(lambda x: colors.rgb2hex(x), mypal))
col = 0
legenditems = []
for key in src.data.keys():
if key == "datetime":
continue
l = key + " "
col = col + 1
cline = p.line(
vmstat.index.values,
vmstat[key],
line_width=1,
alpha=0.8,
color=mypal[col],
)
legenditems += [(key, [cline])]
p.legend.click_policy = "hide"
legend = Legend(items=legenditems, location=(0, -30))
p.add_layout(legend, "right")
return p
def update(attr, old, new):
vmstats_to_plot = [vmstat_selection.labels[i] for i in vmstat_selection.active]
new_src = make_dataset(vmstats_to_plot)
plot = make_plot(new_src)
layout.children[1] = plot
# get data from DB, setup index
cur = db.cursor()
cur.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", ["vmstat"]
)
if len(cur.fetchall()) == 0:
return None
vmstat = pd.read_sql_query("select * from vmstat", db)
vmstat.index = | pd.to_datetime(vmstat["datetime"]) | pandas.to_datetime |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-30
"""
import gc
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
import six, pdb
import pandas as pd
from pandas.io.json import json_normalize
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorBasicDerivation(object):
"""
基础衍生类因子
"""
def __init__(self):
__str__ = 'factor_basic_derivation'
self.name = '基础衍生'
self.factor_type1 = '基础衍生'
self.factor_type2 = '基础衍生'
self.description = '基础衍生类因子'
@staticmethod
def EBIT(tp_derivation, factor_derivation, dependencies=['total_profit', 'interest_expense', 'interest_income', 'financial_expense']):
"""
:name: 息税前利润(MRQ)
:desc: [EBIT_反推法]息税前利润(MRQ) = 利润总额 + 利息支出 - 利息收入
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
func = lambda x: (x[0] + x[1] - x[2]) if x[1] is not None and x[2] is not None else (x[0] + x[3] if x[3] is not None else None)
management['EBIT'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBIT']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITDA(tp_derivation, factor_derivation, dependencies=['total_profit', 'income_tax'],
dependency=['EBIT']):
"""
:name: 息前税后利润(MRQ)
:desc: 息前税后利润(MRQ)=EBIT(反推法)*(if 所得税&利润总额都>0,则1-所得税率,否则为1),所得税税率 = 所得税/ 利润总额
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: None if x[0] is None or x[1] is None or x[2] is None or x[1] == 0 else (x[0] * (1 - x[2] / x[1]) if x[1] > 0 and x[2] > 0 else x[0])
management['EBITDA'] = management[dependency].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBITDA']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def DepAndAmo(tp_derivation, factor_derivation, dependencies=['fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization']):
"""
:name: 折旧和摊销(MRQ)
:desc: 固定资产折旧 + 无形资产摊销 + 长期待摊费用摊销
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['DepAndAmo'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['DepAndAmo']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFF(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'cash_equivalents',
'total_current_liability',
'shortterm_loan',
'shortterm_bonds_payable',
'non_current_liability_in_one_year',
'total_current_assets_pre',
'cash_equivalents_pre',
'total_current_liability_pre',
'shortterm_loan_pre',
'shortterm_bonds_payable_pre',
'non_current_liability_in_one_year_pre',
'fix_intan_other_asset_acqui_cash',
],
dependency=['EBITDA', 'DepAndAmo']):
"""
:name: 企业自由现金流量(MRQ)
:desc: 息前税后利润+折旧与摊销-营运资本增加-资本支出 = 息前税后利润+ 折旧与摊销-营运资本增加-构建固定无形和长期资产支付的现金, 营运资本 = 流动资产-流动负债, 营运资金=(流动资产-货币资金)-(流动负债-短期借款-应付短期债券-一年内到期的长期借款-一年内到期的应付债券)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] + x[1] - (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) + (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) - x[8]if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None and \
x[6] is not None and \
x[7] is not None and \
x[8] is not None else None
management['FCFF'] = management[dependency].apply(func, axis=1)
management = management[['FCFF']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFE(tp_derivation, factor_derivation, dependencies=['borrowing_repayment',
'cash_from_borrowing',
'cash_from_bonds_issue'],
dependency=['FCFF']):
"""
:name: 股东自由现金流量(MRQ)
:desc: 企业自由现金流量-偿还债务所支付的现金+取得借款收到的现金+发行债券所收到的现金(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] + x[2] + x[3] if x[0] is not None and x[1] is not None and \
x[2] is not None and x[3] is not None else None
management['FCFE'] = management[dependency].apply(func, axis=1)
management = management[['FCFE']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NonRecGainLoss(tp_derivation, factor_derivation, dependencies=['np_parent_company_owners', 'np_cut']):
"""
:name: 非经常性损益(MRQ)
:desc: 归属母公司净利润(MRQ) - 扣非净利润(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NonRecGainLoss'] = management[dependencies].apply(func, axis=1)
management = management[['NonRecGainLoss']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetOptInc(tp_derivation, factor_derivation, sw_industry,
dependencies_er=['total_operating_revenue', 'total_operating_cost'],
dependencies_yh=['commission_income', 'net_profit', 'other_business_profits', 'operating_cost'],
dependencies_zq=['commission_income', 'net_profit', 'other_operating_revenue', 'operating_cost'],
dependencies_bx=['operating_revenue', 'operating_cost', 'fair_value_variable_income',
'investment_income', 'exchange_income']):
"""
:name: 经营活动净收益(MRQ)
:desc: 新准则(一般企业):营业总收入-营业总成本"
:unit: 元
:view_dimension: 10000
"""
industry2_set = ['430100', '370100', '410400', '450500', '640500', '510100', '620500', '610200', '330200',
'280400', '620400', '450200', '270500', '610300', '280300', '360300', '410100', '370400',
'280200', '730200', '710200', '720200', '640400', '270300', '110400', '220100', '240300',
'270400', '710100', '420100', '420500', '420400', '370600', '720100', '640200', '220400',
'330100', '630200', '610100', '370300', '410300', '220300', '640100', '490300', '450300',
'220200', '370200', '460200', '420200', '460100', '360100', '620300', '110500', '650300',
'420600', '460300', '720300', '270200', '630400', '410200', '280100', '210200', '420700',
'650200', '340300', '220600', '110300', '350100', '620100', '210300', '240200', '340400',
'240500', '360200', '270100', '230100', '370500', '110100', '460400', '110700', '110200',
'630300', '450400', '220500', '730100', '640300', '630100', '240400', '420800', '650100',
'350200', '620200', '210400', '420300', '110800', '360400', '650400', '110600', '460500',
'430200', '210100', '240100', '250100', '310300', '320200', '310400', '310200', '320100',
'260500', '250200', '450100', '470200', '260200', '260400', '260100', '440200', '470400',
'310100', '260300', '220700', '470300', '470100', '340100', '340200', '230200']
dependencies = list(set(dependencies_er + dependencies_yh + dependencies_bx + dependencies_zq))
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
management = pd.merge(management, sw_industry, how='outer', on='security_code').set_index('security_code')
if len(management) <= 0:
return None
management_tm = pd.DataFrame()
func = lambda x: x[0] + x[1] + x[2] - x[3] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None else None
# 银行 ['440100', '480100']
management_yh = management[management['industry_code2'].isin(['440100', '480100'])]
management_yh['NetOptInc'] = management_yh[dependencies_yh].apply(func, axis=1)
management_tm = management_tm.append(management_yh)
# 证券['440300', '490100']
management_zq = management[management['industry_code2'].isin(['440300', '490100'])]
management_zq['NetOptInc'] = management_zq[dependencies_zq].apply(func, axis=1)
management_tm = management_tm.append(management_zq)
func1 = lambda x: x[0] - x[1] - x[2] - x[3] - x[4] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None else None
# 保险['440400', '490200']
management_bx = management[management['industry_code2'].isin(['440400', '490200'])]
management_bx['NetOptInc'] = management_bx[dependencies_bx].apply(func1, axis=1)
management_tm = management_tm.append(management_bx)
func2 = lambda x: None if x[0] is None else (x[0] if x[1] is None else x[0] - x[1])
management_er = management[management['industry_code2'].isin(industry2_set)]
management_er['NetOptInc'] = management_er[dependencies_er].apply(func2, axis=1)
management_tm = management_tm.append(management_er)
dependencies = dependencies + ['industry_code2']
management_tm = management_tm[['NetOptInc']]
factor_derivation = pd.merge(factor_derivation, management_tm, how='outer', on="security_code")
return factor_derivation
@staticmethod
def WorkingCap(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'total_current_liability']):
"""
:name: 运营资本(MRQ)
:desc: 流动资产(MRQ)-流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['WorkingCap'] = management[dependencies].apply(func, axis=1)
management = management[['WorkingCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TangibleAssets(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets']):
"""
:name: 有形资产(MRQ)
:desc: 股东权益(不含少数股东权益)- (无形资产 + 开发支出 + 商誉 + 长期待摊费用 + 递延所得税资产)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - (x[1] + x[2] + x[3] + x[4] + x[5]) if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None else None
management['TangibleAssets'] = management[dependencies].apply(func, axis=1)
management = management[['TangibleAssets']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def RetainedEarnings(tp_derivation, factor_derivation, dependencies=['surplus_reserve_fund',
'retained_profit']):
"""
:name: 留存收益(MRQ)
:desc: 盈余公积MRQ + 未分配利润MRQ
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['RetainedEarnings'] = management[dependencies].apply(func, axis=1)
management = management[['RetainedEarnings']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeCurLb(tp_derivation, factor_derivation, dependencies=['bill_receivable',
'accounts_payable',
'advance_peceipts',
'salaries_payable',
'taxs_payable',
'accrued_expenses',
'other_payable',
'long_term_deferred_income',
'other_current_liability',
]):
"""
:name: 无息流动负债(MRQ)
:desc: 无息流动负债 = 应收票据+应付帐款+预收款项+应付职工薪酬+应交税费+其他应付款+预提费用+递延收益+其他流动负债
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] if x[0] is not None or \
x[1] is not None or \
x[2] is not None or \
x[3] is not None or \
x[4] is not None or \
x[5] is not None or \
x[6] is not None or \
x[7] is not None or \
x[8] is not None else None
management['InterestFreeCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeNonCurLb(tp_derivation, factor_derivation, dependencies=['total_non_current_liability',
'longterm_loan',
'bonds_payable']):
"""
:name: 无息非流动负债(MRQ)
:desc: 非流动负债合计 - 长期借款 - 应付债券
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['InterestFreeNonCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeNonCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestBearingLiabilities(tp_derivation, factor_derivation, dependencies=['total_liability'],
dependency=['InterestFreeCurLb', 'InterestFreeNonCurLb']):
"""
:name: 带息负债(MRQ)
:desc: 负债合计-无息流动负债-无息非流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependencies + dependency
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and \
x[1] is not None and \
x[2] is not None else None
management['InterestBearingLiabilities'] = management[dependency].apply(func, axis=1)
management = management[['InterestBearingLiabilities']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetDebt(tp_derivation, factor_derivation, dependencies=['cash_equivalents'],
dependency=['InterestBearingLiabilities']):
"""
:name: 净债务(MRQ)
:desc: 净债务 = 带息债务(MRQ) - 货币资金(MRQ)。 其中,带息负债 = 短期借款 + 一年内到期的长期负债 + 长期借款 + 应付债券 + 应付利息
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetDebt'] = management[dependency].apply(func, axis=1)
management = management[['NetDebt']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EquityPC(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners']):
"""
:name: 归属于母公司的股东权益(MRQ)
:desc: 归属于母公司的股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
management = management.rename(columns={'equities_parent_company_owners': 'EquityPC'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalInvestedCap(tp_derivation, factor_derivation, dependencies=['total_owner_equities' ],
dependency=['InterestBearingLiabilities']):
"""
:name: 全部投入资本(MRQ)
:desc: 股东权益+(负债合计-无息流动负债-无息长期负债)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
dependency = dependency + dependencies
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['TotalInvestedCap'] = management[dependency].apply(func, axis=1)
management = management[['TotalInvestedCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalAssets(tp_derivation, factor_derivation, dependencies=['total_assets']):
"""
:name: 资产总计(MRQ)
:desc: 资产总计(MRQ) balance
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_assets': 'TotalAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalFixedAssets(tp_derivation, factor_derivation, dependencies=['total_fixed_assets_liquidation']):
"""
:name: 固定资产合计(MRQ)
:desc: 固定资产合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_fixed_assets_liquidation': 'TotalFixedAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalLib(tp_derivation, factor_derivation, dependencies=['total_liability']):
"""
:name: 负债合计(MRQ)
:desc: 负债合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_liability': 'TotalLib'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def ShEquity(tp_derivation, factor_derivation, dependencies=['total_owner_equities']):
"""
:name: 股东权益(MRQ)
:desc: 股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_owner_equities': 'ShEquity'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def CashAndCashEqu(tp_derivation, factor_derivation, dependencies=['cash_and_equivalents_at_end']):
"""
:name: 期末现金及现金等价物(MRQ)
:desc: 期末现金及现金等价物(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'cash_and_equivalents_at_end': 'CashAndCashEqu'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue']):
"""
:name: 营业总收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_revenue': 'SalesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalOptCostTTM(tp_derivation, factor_derivation, dependencies=['total_operating_cost']):
"""
:name: 营业总成本(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总成本”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_cost': 'TotalOptCostTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptIncTTM(tp_derivation, factor_derivation, dependencies=['operating_revenue']):
"""
:name: 营业收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_revenue': 'OptIncTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def GrossMarginTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 毛利(TTM) 营业毛利润
:desc: 根据截止指定日已披露的最新报告期“毛利”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: (x[0] - x[1]) / x[1] if x[1] != 0 and x[1] is not None else None
management['GrossMarginTTM'] = management[dependencies].apply(func, axis=1)
management = management[['GrossMarginTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesExpensesTTM(tp_derivation, factor_derivation, dependencies=['sale_expense']):
"""
:name: 销售费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“销售费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'SALESEsale_expenseXPE': 'SalesExpensesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AdmFeeTTM(tp_derivation, factor_derivation, dependencies=['administration_expense']):
"""
:name: 管理费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“管理费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'administration_expense': 'AdmFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FinFeeTTM(tp_derivation, factor_derivation, dependencies=['financial_expense']):
"""
:name: 财务费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“财务费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'financial_expense': 'FinFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def PerFeeTTM(tp_derivation, factor_derivation, dependencies=['sale_expense',
'administration_expense',
'financial_expense',
]):
"""
:name: 期间费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“期间费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['PerFeeTTM'] = management[dependencies].apply(func, axis=1)
management = management[['PerFeeTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestExpTTM(tp_derivation, factor_derivation, dependencies=['interest_expense']):
"""
:name: 利息支出(TTM)
:desc: 根据截止指定日已披露的最新报告期“利息支出”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'interest_expense': 'InterestExpTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def MinorInterestTTM(tp_derivation, factor_derivation, dependencies=['minority_profit']):
"""
:name: 少数股东损益(TTM)
:desc: 根据截止指定日已披露的最新报告期“少数股东损益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'minority_profit': 'MinorInterestTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AssetImpLossTTM(tp_derivation, factor_derivation, dependencies=['asset_impairment_loss']):
"""
:name: 资产减值损失(TTM)
:desc: 根据截止指定日已披露的最新报告期“资产减值损失”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'asset_impairment_loss': 'AssetImpLossTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromOptActTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 经营活动净收益(TTM)
:desc: 根据截止指定日已披露的最新报告期“经营活动净收益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetIncFromOptActTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromOptActTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromValueChgTTM(tp_derivation, factor_derivation, dependencies=['fair_value_variable_income',
'investment_income',
'exchange_income',
]):
"""
:name: 价值变动净收益(TTM)
:desc: 公允价值变动净收益+投资净收益+汇兑净收益
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['NetIncFromValueChgTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromValueChgTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptProfitTTM(tp_derivation, factor_derivation, dependencies=['operating_profit']):
"""
:name: 营业利润(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业利润”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_profit': 'OptProfitTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetNonOptIncAndExpTTM(tp_derivation, factor_derivation, dependencies=['non_operating_revenue',
'non_operating_expense', ]):
"""
:name: 营业外收支净额(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业外收支净额”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1]
management['NetNonOptIncAndExpTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetNonOptIncAndExpTTM']]
factor_derivation = | pd.merge(factor_derivation, management, how='outer', on="security_code") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 01:55:22 2020
@author: balajiramesh
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 00:25:12 2020
@author: balajiramesh
Raw : 16,319230 2,641562
Within study timeline: 14393806 2247749
Within study area and timeline: 7892752 1246896
AFter removing washout period: 7816138 1233913
After removeing missing data: 7,813,866 and 1,233,600 OP and IP ED visit records
"""
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
from datetime import timedelta, date,datetime
from dateutil import parser
import glob
import sys
sys.path.insert(1, r'Z:\Balaji\GRAScripts\dhs_scripts')
from recalculate_svi import recalculateSVI
#%%functions
def filter_mortality(df):
pat_sta=df.PAT_STATUS.copy()
pat_sta=pd.to_numeric(pat_sta,errors="coerce")
return pat_sta.isin([20,40,41,42]).astype('int') #status code for died
def get_sp_outcomes(sp,Dis_cat):
global sp_outcomes
return sp.merge(sp_outcomes.loc[:,['RECORD_ID','op',Dis_cat]],on=['RECORD_ID','op'],how='left')[Dis_cat].values
#%%read ip op data
INPUT_IPOP_DIR=r'Z:\Balaji\DSHS ED visit data(PII)\CleanedMergedJoined'
#read_op
op=pd.read_pickle(INPUT_IPOP_DIR+'\\op')
op=op.loc[:,['RECORD_ID','STMT_PERIOD_FROM','PAT_ADDR_CENSUS_BLOCK_GROUP','PAT_AGE_YEARS','SEX_CODE','RACE','PAT_STATUS','ETHNICITY','PAT_ZIP','LCODE']]
op['op']=True
#sp=pd.read_pickle(INPUT_IPOP_DIR+r'\op')
#read_ip
ip=pd.read_pickle(INPUT_IPOP_DIR+'\\ip')
ip=ip.loc[:,['RECORD_ID','STMT_PERIOD_FROM','PAT_ADDR_CENSUS_BLOCK_GROUP','PAT_AGE_YEARS','SEX_CODE','RACE','PAT_STATUS','ETHNICITY','PAT_ZIP','LCODE']]
ip['op']=False
#merge Ip and OP
op=pd.concat([op,ip])
sp=op
del op,ip
#read op/ip outcomes df
sp_outcomes=pd.read_csv(INPUT_IPOP_DIR+'\\ip_op_outcomes.csv')
#read flood ratio data
flood_data=pd.read_csv(r'Z:/Balaji/indundation_harvey/FloodRatioJoinedAll_v1/FloodInund_AllJoined_v1.csv')
#read svi data
SVI_df_raw=pd.read_csv(r'Z:/Balaji/SVI_Raw/TEXAS.csv')
SVI_df_raw.FIPS=pd.to_numeric(SVI_df_raw.FIPS)
#read population data
demos=pd.read_csv(r'Z:/Balaji/Census_data_texas/population/ACS_17_5YR_DP05_with_ann.csv',low_memory=False,skiprows=1)
demos.Id2=demos.Id2.astype("Int64")
#read study area counties
#county_to_filter=pd.read_csv('Z:/Balaji/counties_evacu_order.csv').GEOID.to_list()
county_to_filter=pd.read_csv('Z:\Balaji\DSHS ED visit data(PII)\contiesInStudyArea.csv').County_FIPS.to_list()
#%%read the categories file
outcome_cats=pd.read_csv('Z:/Balaji/GRAScripts/dhs_scripts/categories.csv')
outcome_cats.fillna('',inplace=True)
#%%predefine variable
flood_cats_in=1
floodr_use="DFO_R200" #['DFO_R200','DFO_R100','LIST_R20','DFO_R20','DFOuLIST_R20']
nullAsZero="True" #null flood ratios are changed to 0
floodZeroSep="True" # zeros are considered as seperate class
flood_data_zip=None
interv_dates=[20170825, 20170913, 20171014,20180701,20181001] #lower bound excluded
washout_period=[20170819,20170825] #including the dates specified
interv_dates_cats=['flood','PostFlood1','PostFlood2','NextYear1','NextYear2']
Dis_cat="ALL"
#%%cleaing for age, gender and race and create census tract
#age
sp.loc[:,'PAT_AGE_YEARS']=pd.to_numeric(sp.PAT_AGE_YEARS,errors="coerce")
sp.loc[:,'PAT_AGE_YEARS']=sp.loc[:,'PAT_AGE_YEARS'].astype('float')
#bin ages
#sp.loc[:,'PAT_AGE_YEARS']=pd.cut(sp.PAT_AGE_YEARS,bins=[0,1,4,11,16,25,64,150],include_lowest=True,labels=(0,1,4,11,16,25,64))
#gender
sp.loc[~sp.SEX_CODE.isin(["M","F"]),'SEX_CODE']=np.nan
sp.SEX_CODE=sp.SEX_CODE.astype('category').cat.reorder_categories(['M','F'],ordered=False)
#ethinicity
sp.loc[:,'ETHNICITY']= | pd.to_numeric(sp.ETHNICITY,errors="coerce") | pandas.to_numeric |
import streamlit as st
import mysql.connector
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from fbprophet.plot import plot_components_plotly
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from plotly import graph_objs as go
import pandas as pd
import numpy as np
#Page expands to full width
st.set_page_config(layout="wide")
st.title("Predictive Analytics")
#About
expander_bar = st.expander("About")
expander_bar.markdown("""
**Context:** Time-series forecasting using *Prophet* model to ...
""")
product = ("All Product","Big Buko Pie / Box","Mini Buko Pie Box","Mini Buko Pie Piece","Macaroons","Macapuno Balls","Coffee",
"Buko Juice 1L Bottle","Buko Shake 1L Bottle","Macapuno Shake 1L Bottle","Buko Juice 12oz Cup","Buko Juice 16oz Cup",
"Buko Juice 22oz Cup","Buko Shake 12oz Cup","Buko Shake 16oz Cup","Buko Shake 22oz Cup","Hot Choco","Macapuno Shake 12oz Cup",
"Macapuno Shake 16oz Cup","Macapuno Shake 22oz Cup","Buko Juice 350ml Bottle","Buko Shake 350ml Bottle",
"Buko Shake 500ml Bottle","Macapuno Shake 350ml Bottle","Macapuno Shake 500ml Bottle")
selected_product = st.selectbox("Select product for prediction:",product)
n_days = st.slider('Days of prediction:', 1, 7)
connection = mysql.connector.connect(host = 'sql6.freesqldatabase.com',user = 'sql6450411', passwd = '<PASSWORD>', db = 'sql6450411')
if selected_product == "All Product":
sales = | pd.read_sql_query("SELECT * FROM sales_order WHERE date >= '2021-03-01 00:00:00'", connection) | pandas.read_sql_query |
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import needs_i8_conversion
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_value_counts(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.value_counts()
counter = collections.Counter(obj)
expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated().any():
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_null(null_obj, index_or_series_obj):
orig = index_or_series_obj
obj = orig.copy()
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(orig, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_value_counts_inferred(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
msg = "bins argument only works with numeric data"
with pytest.raises(TypeError, match=msg):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({}) if klass is dict else klass({}, dtype=object)
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
def test_value_counts_datetime64(index_or_series):
klass = index_or_series
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT] * 4)
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s = pd.concat([Series([4], index=DatetimeIndex([pd.NaT])), expected_s])
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = | TimedeltaIndex(["1 days"], name="dt") | pandas.TimedeltaIndex |
import argparse
import copy
import cPickle
import matplotlib.pyplot as plt
import ntpath
import numpy as np
import pandas as pd
import pylab
#from pylab import plot, show, savefig, xlim, figure, hold, ylim, legend, boxplot, setp, axes, xlabel, ylabel
import scipy
import time
import sys, os, re
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.grid_search import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from context import diana
import diana.classes.drug as diana_drug
import diana.classes.analysis as diana_analysis
def main():
options = parse_user_arguments()
analysis_results(options)
def parse_user_arguments(*args, **kwds):
"""
Parses the arguments of the program
"""
parser = argparse.ArgumentParser(
description = "Generate the profiles of the input drug",
epilog = "@oliva's lab 2017")
parser.add_argument('-cr','--crossings_file',dest='crossings_file',action = 'store',
help = """Define the file where the drug crossings to be explored have been written""")
parser.add_argument('-sif','--sif_file',dest='sif',action = 'store',
help = """" Input file with the protein-protein interaction network in SIF format that will be used in the experiment. """)
parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',
help = """List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:
- Different numbers that will be the threshold values separated by newline characters.
For example, a file called "top_threshold.list" containing:
0.1
0.5
1
5
10
""")
parser.add_argument('-se','--consider_se',dest='consider_se',action = 'store_true',
help = """" Consider Side Effects / ATCs. """)
parser.add_argument('-pca','--pca',dest='pca',action = 'store_true',
help = """" Make a PCA to reduce dimensionality. """)
parser.add_argument('-cp','--comparison',dest='comparison_other_methods',action = 'store_true',
help = """" If we are considering a dataset to compare with other methods. """)
parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),
help = """Define the workspace directory where the data directory and the results directory will be created""")
options=parser.parse_args()
return options
#################
#################
# MAIN FUNCTION #
#################
#################
def analysis_results(options):
"""
Analyzes the results of the comparisons
"""
# Start marker for time measure
start = time.time()
print("\n\t\t----------------------------------------------------------------------------------------------------------------------------\n")
print("\t\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. Analysis of results: Selection of classifier\n")
print("\t\t----------------------------------------------------------------------------------------------------------------------------\n")
# Get the script path
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
toolbox_dir = os.path.join(main_path, 'diana/toolbox')
# Check the directory of the profiles and comparisons
data_dir = os.path.join(options.workspace, "profiles")
check_directory(data_dir)
results_dir = os.path.join(options.workspace, "comparisons")
check_directory(results_dir)
# Create a directory for the analysis inside the workspace
analysis_dir = os.path.join(options.workspace, "analysis")
create_directory(analysis_dir)
# Create a directory for the analysis of the comparison with other methods
if options.comparison_other_methods:
analysis_dir = os.path.join(options.workspace, "analysis_comparison")
create_directory(analysis_dir)
# Get the list of thresholds to create the profiles
if options.threshold_list and fileExist(options.threshold_list):
threshold_list = get_values_from_threshold_file(options.threshold_list)
else:
threshold_list = [1, 5, 10, 20, 50]
# Do we consider Side Effects/ATC?
if options.consider_se:
consider_se = True
else:
consider_se = False
# Get the names of the columns
columns = diana_analysis.obtain_columns(threshold_list, ATC_SE=consider_se)
#-----------------------------------------------------#
# PARSE THE RESULTS AND CREATE A PANDAS DATAFRAME #
#-----------------------------------------------------#
pair2comb_file = os.path.join(toolbox_dir, 'pair2comb.pcl')
pair2comb = cPickle.load(open(pair2comb_file))
ddi = sum(1 for x in pair2comb.values() if x == 1)
non_ddi = sum(1 for x in pair2comb.values() if x == 0)
print('NUMBER OF DRUG COMBINATIONS:\t\t{}\n'.format(ddi))
print('NUMBER OF NON-DRUG COMBINATIONS:\t{}\n'.format(non_ddi))
output_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons.csv')
# Change the name of the output file if we are doing a comparison with other methods
if options.comparison_other_methods:
output_dataframe = os.path.join(analysis_dir, 'comparison_other_methods.csv')
if not fileExist(output_dataframe):
# Create a data frame to store the results
df = pd.DataFrame(columns=columns)
# Prepare files
network_filename = ntpath.basename(options.sif)
drugbank2targets_file = os.path.join(toolbox_dir, 'drugbank_to_targets.pcl')
drug2targets = cPickle.load(open(drugbank2targets_file))
# Open the crossings file
crossings_file = options.crossings_file
with open(crossings_file, 'r') as crossings_file_fd:
for line in crossings_file_fd:
crossing = line.strip()
drug1, drug2 = crossing.split('---')
# Get drug IDs
targets1 = list(drug2targets[drug1.upper()])
drug_id1 = diana_drug.generate_drug_id(drug1, targets1, network_filename)
targets2 = list(drug2targets[drug2.upper()])
drug_id2 = diana_drug.generate_drug_id(drug2, targets2, network_filename)
# Check results table
comparison = '{}---{}'.format(drug_id1, drug_id2)
comparison_dir = os.path.join(results_dir, comparison)
results_table = os.path.join(comparison_dir, 'results_table.tsv')
if not fileExist(results_table):
print('The comparison of {} ({}) and {} ({}) has not been executed properly!\n'.format(drug1, drug_id1, drug2, drug_id2))
sys.exit(10)
if crossing in pair2comb:
combination_field = pair2comb[crossing]
else:
print('The comparison {} is not in the pair2comb dictionary!\n'.format(crossing))
sys.exit(10)
results = diana_analysis.get_results_from_table(results_table, columns, combination_field)
df2 = pd.DataFrame([results], columns=columns, index=[comparison])
# Add the information to the main data frame
df = df.append(df2)
# Output the Pandas dataframe in a CSV file
df.to_csv(output_dataframe)
else:
df = pd.read_csv(output_dataframe, index_col=0)
#---------------------------#
# REMOVE MISSING VALUES #
#---------------------------#
# Replace the None values in dcstructure by nan
if 'None' in df['dcstructure']:
df = df.replace(to_replace={'dcstructure':{'None':np.nan}})
# Remove the nan values in dcstructure
df = df.dropna()
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing missing values:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing missing values:\t{}\n'.format(num_ndc))
#---------------------------#
# IDENTIFY ME-TOO DRUGS #
#---------------------------#
me_too_dir = os.path.join(analysis_dir, 'me_too_drugs')
create_directory(me_too_dir)
me_too_drugs_table = os.path.join(me_too_dir, 'me_too_drugs.tsv')
me_too_drug_combs_table = os.path.join(me_too_dir, 'me_too_drug_combinations.tsv')
me_too_drug_pairs_file = os.path.join(me_too_dir, 'me_too_drug_pairs.pcl')
me_too_drug_comb_pairs_file = os.path.join(me_too_dir, 'me_too_drug_comb_pairs.pcl')
if not fileExist(me_too_drug_pairs_file) or not fileExist(me_too_drug_comb_pairs_file):
df_struc = df[['dcstructure']]
df_struc = df_struc.astype(float)
me_too_drug_pairs, me_too_drug_comb_pairs = diana_analysis.obtain_me_too_drugs_and_combinations(df_struc, columns, me_too_drugs_table, me_too_drug_combs_table)
cPickle.dump(me_too_drug_pairs, open(me_too_drug_pairs_file, 'w'))
cPickle.dump(me_too_drug_comb_pairs, open(me_too_drug_comb_pairs_file, 'w'))
else:
me_too_drug_pairs = cPickle.load(open(me_too_drug_pairs_file))
me_too_drug_comb_pairs = cPickle.load(open(me_too_drug_comb_pairs_file))
# Process me-too drug combination pairs
me_too_drug_combinations = set()
drug_pair_to_me_too_times = {}
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
me_too_drug_combinations.add(frozenset([drug_comb1, drug_comb2]))
drug_pair_to_me_too_times.setdefault(drug_comb1, 0)
drug_pair_to_me_too_times.setdefault(drug_comb2, 0)
drug_pair_to_me_too_times[drug_comb1] += 1
drug_pair_to_me_too_times[drug_comb2] += 1
removed_drug_pairs = set()
for pair in me_too_drug_comb_pairs:
drug_comb1, drug_comb2 = pair.split('___')
if drug_comb1 in removed_drug_pairs or drug_comb2 in removed_drug_pairs:
continue
if drug_pair_to_me_too_times[drug_comb1] > drug_pair_to_me_too_times[drug_comb2]:
removed_drug_pairs.add(drug_comb1)
else:
removed_drug_pairs.add(drug_comb2)
# Remove the drug pairs which appear in me-too pairs of drug pairs more times
df = df.loc[~df.index.isin(list(removed_drug_pairs))]
# Count the number of drug combinations / non-drug combinations
dc_data = df[df['combination'] == 1]
ndc_data = df[df['combination'] == 0]
num_dc = len(dc_data.index)
num_ndc = len(ndc_data.index)
print('Number of drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_dc))
print('Number of non-drug combinations after removing me-too conflictive drug pairs:\t{}\n'.format(num_ndc))
#-----------------------------------------------------------#
# DIVIDE THE DATASET IN A TRAINING AND A VALIDATION SET #
#-----------------------------------------------------------#
training_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons_training.csv')
validation_dataframe = os.path.join(analysis_dir, 'dcdb_comparisons_validation.csv')
proportion_training = 0.8
# Change the name of the output file if we are doing a comparison with other methods
if options.comparison_other_methods:
training_dataframe = os.path.join(analysis_dir, 'comparison_other_methods_training.csv')
validation_dataframe = os.path.join(analysis_dir, 'comparison_other_methods_validation.csv')
if not fileExist(training_dataframe) or not fileExist(validation_dataframe):
num_dc_training = int(round(num_dc*proportion_training))
num_ndc_training = int(round(num_ndc*proportion_training))
print('Training set (positives): {} out of {} ({}%)\n'.format(num_dc_training, num_dc, proportion_training*100))
print('Training set (negatives): {} out of {} ({}%)\n'.format(num_ndc_training, num_ndc, proportion_training*100))
dc_data_training = dc_data.sample(n=num_dc_training) # Get a random sample
ndc_data_training = ndc_data.sample(n=num_ndc_training)
dc_data_validation = dc_data.loc[~dc_data.index.isin(dc_data_training.index)] # Remove the sample that we have taken from the dataframe
ndc_data_validation = ndc_data.loc[~ndc_data.index.isin(ndc_data_training.index)]
df_training = pd.concat([dc_data_training, ndc_data_training])
df_validation = | pd.concat([dc_data_validation, ndc_data_validation]) | pandas.concat |
# Core Pkg
import streamlit as st
import streamlit.components.v1 as stc
# EDA Pkgs
import pandas as pd
# Data Vis Pkgs
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("Agg")
# Opening Files/Forensic MetaData Extraction
# For Images
from PIL import Image
import exifread
import os
from datetime import datetime
import base64
# For Audio
import mutagen
# For PDF
from PyPDF2 import PdfFileReader
# HTML
metadata_wiki = """
Metadata is defined as the data providing information about one or more aspects of the data; it is used to summarize basic information about data which can make tracking and working with specific data easier
"""
HTML_BANNER = """
<div style="background-color:#464e5f;padding:10px;border-radius:10px">
<h1 style="color:white;text-align:center;">MetaData Extractor App </h1>
</div>
"""
# Functions
@st.cache
def load_image(image_file):
img = Image.open(image_file)
return img
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
# Fxn to Download
def make_downloadable(data):
csvfile = data.to_csv(index=False)
b64 = base64.b64encode(csvfile.encode()).decode() # B64 encoding
st.markdown("### ** Download CSV File ** ")
new_filename = "metadata_result_{}.csv".format(timestr)
href = f'<a href="data:file/csv;base64,{b64}" download="{new_filename}">Click Here!</a>'
st.markdown(href, unsafe_allow_html=True)
# DB Managment
from db_fxns import *
# Utils
from app_utils import *
# App Structure
def main():
"""Meta Data Extraction App"""
# st.title("MetaData Extraction App")
stc.html(HTML_BANNER)
menu = ["Home", "Image", "Audio", "DocumentFiles", "Analytics", "About"]
choice = st.sidebar.selectbox("Menu", menu)
create_uploaded_filetable()
if choice == "Home":
st.subheader("Home")
# Image
st.image(load_image("images/metadataextraction_app_jcharistech.png"))
# Description
st.write(metadata_wiki)
# Expanders & Columns
col1, col2, col3 = st.beta_columns(3)
with col1:
with st.beta_expander("Get Image Metadata 📷"):
st.info("Image Metadata")
st.markdown("📷")
st.text("Upload JPEG,JPG,PNG Images")
with col2:
with st.beta_expander("Get Audio Metadata 🔉"):
st.info("Audio Metadata")
st.markdown("🔉")
st.text("Upload Mp3,Ogg")
with col3:
with st.beta_expander("Get Document Metadata 📄📁"):
st.info("Document Files Metadata")
st.markdown("📄📁")
st.text("Upload PDF,Docx")
elif choice == "Image":
st.subheader("Image MetaData Extraction")
image_file = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg"])
if image_file is not None:
# UploadFile Class is File-Like Binary Byte
# st.write(type(image_file))
# st.write(dir(image_file))
with st.beta_expander("File Stats"):
file_details = {
"FileName": image_file.name,
"FileSize": image_file.size,
"FileType": image_file.type,
}
st.write(file_details)
statinfo = os.stat(image_file.readable())
st.write(statinfo)
stats_details = {
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
st.write(stats_details)
# Combine All Details
file_details_combined = {
"FileName": image_file.name,
"FileSize": image_file.size,
"FileType": image_file.type,
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
# Convert to DataFrame
df_file_details = pd.DataFrame(
list(file_details_combined.items()), columns=["Meta Tags", "Value"]
)
st.dataframe(df_file_details)
# Track Details
add_file_details(
image_file.name, image_file.type, image_file.size, datetime.now()
)
# Layouts
c1, c2 = st.beta_columns(2)
with c1:
with st.beta_expander("View Image"):
img = load_image(image_file)
st.image(img, width=250, height=250)
with c2:
with st.beta_expander("Default(JPEG)"):
st.info("Using PILLOW")
img = load_image(image_file)
# st.write(dir(img))
img_details = {
"format": img.format,
"format_desc": img.format_description,
"filename": img.filename,
"size": img.size,
"height": img.height,
"width": img.width,
"info": img.info,
}
# st.write(img_details)
df_img_details_default = pd.DataFrame(
list(img_details.items()), columns=["Meta Tags", "Value"]
)
st.dataframe(df_img_details_default)
# Layouts For Forensic
fcol1, fcol2 = st.beta_columns(2)
with fcol1:
with st.beta_expander("Exifread Tool"):
meta_tags = exifread.process_file(image_file)
# st.write(meta_tags)
df_img_details_exifread = pd.DataFrame(
list(meta_tags.items()), columns=["Meta Tags", "Value"]
)
st.dataframe(df_img_details_exifread)
with fcol2:
with st.beta_expander("Image Geo-Coordinates"):
img_details_with_exif = get_exif(image_file)
try:
gpg_info = img_details_with_exif
except:
gpg_info = "None Found"
# st.write(gpg_info)
img_coordinates = get_decimal_coordinates(gpg_info)
st.write(img_coordinates)
with st.beta_expander("Download Results"):
final_df = pd.concat(
[df_file_details, df_img_details_default, df_img_details_exifread]
)
st.dataframe(final_df)
make_downloadable(final_df)
elif choice == "Audio":
st.subheader("Audio MetaData Extraction")
# File Upload
audio_file = st.file_uploader("Upload Audio", type=["mp3", "ogg"])
if audio_file is not None:
# Layouts
col1, col2 = st.beta_columns(2)
with col1:
st.audio(audio_file.read())
with col2:
with st.beta_expander("File Stats"):
file_details = {
"FileName": audio_file.name,
"FileSize": audio_file.size,
"FileType": audio_file.type,
}
st.write(file_details)
statinfo = os.stat(audio_file.readable())
# st.write(statinfo)
stats_details = {
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
st.write(stats_details)
# Combine All Details
file_details_combined = {
"FileName": audio_file.name,
"FileSize": audio_file.size,
"FileType": audio_file.type,
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
# Convert to DataFrame
df_file_details = pd.DataFrame(
list(file_details_combined.items()),
columns=["Meta Tags", "Value"],
)
st.dataframe(df_file_details)
# Track Details
add_file_details(
audio_file.name,
audio_file.type,
audio_file.size,
datetime.now(),
)
# Extraction Process using mutagen
with st.beta_expander("Metadata with Mutagen"):
meta_tags = mutagen.File(audio_file)
# st.write(meta_tags)
df_audio_details_with_mutagen = pd.DataFrame(
list(meta_tags.items()), columns=["Meta Tags", "Value"]
)
st.dataframe(df_audio_details_with_mutagen)
with st.beta_expander("Download Results"):
final_df = pd.concat([df_file_details, df_audio_details_with_mutagen])
st.dataframe(final_df)
make_downloadable(final_df)
elif choice == "DocumentFiles":
st.subheader("DocumentFiles MetaData Extraction")
# FIle Upload
text_file = st.file_uploader("Upload File", type=["PDF"])
if text_file is not None:
dcol1, dcol2 = st.beta_columns([1, 2])
with dcol1:
with st.beta_expander("File Stats"):
file_details = {
"FileName": text_file.name,
"FileSize": text_file.size,
"FileType": text_file.type,
}
st.write(file_details)
statinfo = os.stat(text_file.readable())
stats_details = {
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
st.write(stats_details)
# Combine All Details
file_details_combined = {
"FileName": text_file.name,
"FileSize": text_file.size,
"FileType": text_file.type,
"Accessed_Time": get_readable_time(statinfo.st_atime),
"Creation_Time": get_readable_time(statinfo.st_ctime),
"Modified_Time": get_readable_time(statinfo.st_mtime),
}
# Convert to DataFrame
df_file_details = pd.DataFrame(
list(file_details_combined.items()),
columns=["Meta Tags", "Value"],
)
# st.dataframe(df_file_details)
# Track Details
add_file_details(
text_file.name, text_file.type, text_file.size, datetime.now()
)
# Extraction Process
with dcol2:
with st.beta_expander("Metadata"):
pdf_file = PdfFileReader(text_file)
pdf_info = pdf_file.getDocumentInfo()
# st.write(pdf_info)
# Convert to DataFrame
df_file_details_with_pdf = pd.DataFrame(
list(pdf_info.items()), columns=["Meta Tags", "Value"]
)
st.dataframe(df_file_details_with_pdf)
# Download
with st.beta_expander("Download Results"):
final_df = | pd.concat([df_file_details, df_file_details_with_pdf]) | pandas.concat |
import numpy as np
import pandas as pd
returns = prices.pct_change()
returns.dropna()
returns.std()
deviations = (returns - returns.mean())**2
squared_deviations = deviations ** 2
variance = squared_deviations.mean()
volatility = np.sqrt(variance)
me_m = pd.read_csv('./Data/Portfolios_Formed_on_ME_monthly_EW.csv',
header=0, index_col=0, parse_dates=True, na_values=-99.99)
rets = me_m[['Lo 10', 'Hi 10']]
rets.columns = ['SmallCap', 'LargeCap']
rets = rets / 100
rets.plot.line()
rets.head()
rets.index = pd.to_datetime(rets.index, format='%Y%m')
rets.head()
rets.index = rets.index.to_period('M')
rets['1975']
wealth_index = 1000 * (1+rets['LargeCap']).cumprod()
wealth_index.plot.line()
previous_peaks = wealth_index.cummax()
previous_peaks.plot.line()
drawdown = (wealth_index - previous_peaks) / previous_peaks
drawdown.plot()
drawdown.min()
drawdown['1975':].min()
drawdown['1975':].idxmin()
def drawdown(return_series: pd.Series):
"""
Takes a time series of asset returns
Computes and returns a DataFrame that contains:
the wealth index
the previous peaks
percent drawdowns
:param return_series:
:return:
"""
wealth_index = 1000 * (1+return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks) / previous_peaks
return pd.DataFrame(
{
"Wealth": wealth_index,
"Peaks": previous_peaks,
"Drawdown": drawdowns
}
)
drawdown(rets['LargeCap']).head()
drawdown(rets['LargeCap'])[['Wealth', 'Peaks']].plot()
import pandas as pd
import EDHEC.edhec_risk_kit as erk
hfi = erk.get_hfi_returns()
hfi.head()
pd.concat([hfi.mean(), hfi.median(), hfi.mean()>hfi.median()], axis='columns')
erk.skewness(hfi).sort_values()
import scipy.stats
scipy.stats.skew(hfi)
import numpy as np
normal_rets = np.random.normal(0, .15, size=(263, 1))
erk.skewness(normal_rets)
erk.kurtosis(normal_rets)
erk.kurtosis(hfi)
scipy.stats.kurtosis(normal_rets)
scipy.stats.jarque_bera(normal_rets)
scipy.stats.jarque_bera(hfi)
erk.is_normal(normal_rets)
hfi.aggregate(erk.is_normal)
ffme = erk.get_ffme_returns()
erk.skewness(ffme)
erk.kurtosis(ffme)
hfi.std(ddof=0)
hfi[hfi<0].std(ddof=0)
erk.semideviation(hfi)
# Historical VaR
# Parametric VaR - Gaussian
# Modified Cornish-Fisher VaR
np.percentile(hfi, q=5, axis=0)
hfi.apply(lambda x: np.percentile(x, q=5, axis=0))
erk.var_historic(hfi)
from scipy.stats import norm
z = norm.ppf(.05)
hfi.mean() + z*hfi.std(ddof=0)
erk.var_gaussian(hfi)
var_list = [erk.var_gaussian(hfi), erk.var_gaussian(hfi, modified=True), erk.var_historic(hfi)]
comparison = | pd.concat(var_list, axis=1) | pandas.concat |
import pandas as pd
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import random
from sklearn import svm
from keras.optimizers import Adam
from keras.layers import LeakyReLU
from nltk.stem import WordNetLemmatizer
import operator
from textblob import TextBlob
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
import re
from wordcloud import WordCloud
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Model
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
class MBTI():
def __init__(self):
self.csv_path = "mbti_1.csv"
self.df = pd.read_csv(self.csv_path)
self.original_df = self.df.copy()
self.porter = PorterStemmer()
self.lancaster = LancasterStemmer()
self.lemmatizer = WordNetLemmatizer()
self.all_words = {}
def store_clean_df(self):
self.df.to_csv('clean.csv')
def load_clean_df(self):
self.df = | pd.read_csv('clean.csv') | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class PandasDataframe.
PandasDataframe is a parent abstract class for any dataframe class
for pandas storage format.
"""
from collections import OrderedDict
import numpy as np
import pandas
import datetime
from pandas.core.indexes.api import ensure_index, Index, RangeIndex
from pandas.core.dtypes.common import is_numeric_dtype, is_list_like
from pandas._libs.lib import no_default
from typing import List, Hashable, Optional, Callable, Union, Dict
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.parsers import (
find_common_type_cat as find_common_type,
)
from modin.core.dataframe.base.dataframe.dataframe import ModinDataframe
from modin.core.dataframe.base.dataframe.utils import (
Axis,
JoinType,
)
from modin.pandas.indexing import is_range_like
from modin.pandas.utils import is_full_grab_slice, check_both_not_none
from modin.logging import LoggerMetaClass
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
class PandasDataframe(object, metaclass=LoggerMetaClass):
"""
An abstract class that represents the parent class for any pandas storage format dataframe class.
This class provides interfaces to run operations on dataframe partitions.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
# These properties flag whether or not we are deferring the metadata synchronization
_deferred_index = False
_deferred_column = False
@property
def __constructor__(self):
"""
Create a new instance of this object.
Returns
-------
PandasDataframe
"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = | ensure_index(columns) | pandas.core.indexes.api.ensure_index |
"""
This example reads texts and return the average glove embeddings for sentence.
>>> get_features(
>>> tweet_samples,
>>> embedding=WordEmbedding(model),
>>> preprocessor=TweetPreprocessor(normalize=['link', 'mention']),
>>> tokenizer=TweetTokenizer()
>>> ).shape
>>> (5, 100)
"""
from typing import List, Text
import gensim.downloader as api
import pandas as pd
from nltk import TweetTokenizer
from tklearn.embedding import WordEmbedding
from tklearn.feature_extraction import make_embedding_transformer
from tklearn.preprocessing import TweetPreprocessor, TextPreprocessor
model = api.load('glove-twitter-100')
def get_features(texts: List[Text], embedding: WordEmbedding, preprocessor: TextPreprocessor, tokenizer):
pp, tk = preprocessor, tokenizer
tokenized_texts = | pd.Series(texts) | pandas.Series |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
| pd.offsets.Minute() | pandas.offsets.Minute |
import pysam
import pandas as pd
import os
input_files = snakemake.input
df = pd.DataFrame(columns=["Sample"])
for sample_file in input_files:
variant_file = pysam.VariantFile(sample_file)
sample_name = os.path.basename(sample_file).split(".")[0]
gene_variant_dict = {"Sample": [sample_name]}
for rec in variant_file.fetch():
for sample in rec.samples:
allele_frequencies = rec.samples[sample]["AF"] #Can be multiple entries
for allele_frequency in allele_frequencies:
variant = rec.info["SVLEN"]
if variant[0]:
variant_type = "INDEL"
else:
variant_type = "SNV"
transcripts = rec.info["ANN"]
for transcript in transcripts:
gene = transcript.split("|")[3]
if gene not in gene_variant_dict:
gene_variant_dict[gene] = set()
gene_variant_dict[gene].add(variant_type)
break
for key, value in gene_variant_dict.items():
gene_variant_dict[key] = ','.join(value)
sample_df = pd.DataFrame(gene_variant_dict, index=[0])
df = | pd.concat([df, sample_df], join="outer", ignore_index=False, sort=False) | pandas.concat |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
"""
# Benchmarks
We benchmark GPflux' Deep GP on several UCI datasets.
The code to run the experiments can be found in `benchmarking/main.py`. The results are stored in `benchmarking/runs/*.json`. In this script we aggregate and plot the outcomes.
"""
# %% {"nbsphinx": "hidden"}
import glob
import json
import numpy as np
import pandas as pd
# %% {"nbsphinx": "hidden"}
LOGS = "../../benchmarking/runs/*.json"
data = []
for path in glob.glob(LOGS):
with open(path) as json_file:
data.append(json.load(json_file))
df = | pd.DataFrame.from_records(data) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import json
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('averaged_perceptron_tagger')
import spacy
import math
import string
import sys
import random
from collections import Counter
from itertools import chain
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ge=pd.read_csv('./testgoodwordse.csv')
gn=pd.read_csv('./testgoodwordsn.csv')
gc=pd.read_csv('./testgoodwordsc.csv')
be=pd.read_csv('./testbadwordse.csv')
bn=pd.read_csv('./testbadwordsn.csv')
bc=pd.read_csv('./testbadwordsc.csv')
glabel=pd.read_csv('./testgoodwords.csv')
blabel=pd.read_csv('./testbadwords.csv')
nlp = spacy.load("en_trf_bertbaseuncased_lg")
def find_ngrams(input_list, n):
return list(zip(*[input_list[i:] for i in range(n)]))
data = blabel['fullnopunc'].to_list()
df = pd.DataFrame(data,columns=['sentence'])
df = df.dropna(thresh=1)
df['trigrams'] = df['sentence'].map(lambda x: find_ngrams(x.split(" "), 3))
trigrams = df['trigrams'].tolist()
trigrams = list(chain(*trigrams))
trigrams = [(x.lower(), y.lower(),z.lower()) for x,y,z in trigrams]
trigram_counts = Counter(trigrams)
tri=pd.Series(trigram_counts)
datae = be['fullnopunc'].to_list()
dfe = pd.DataFrame(datae,columns=['sentence'])
dfe = dfe.dropna(thresh=1)
dfe['trigrams'] = dfe['sentence'].map(lambda x: find_ngrams(x.split(" "), 3))
datan = bn['fullnopunc'].to_list()
dfn = pd.DataFrame(datan,columns=['sentence'])
dfn = dfn.dropna(thresh=1)
dfn['trigrams'] = dfn['sentence'].map(lambda x: find_ngrams(x.split(" "), 3))
datac = bc['fullnopunc'].to_list()
dfc = pd.DataFrame(datac,columns=['sentence'])
dfc = dfc.dropna(thresh=1)
dfc['trigrams'] = dfc['sentence'].map(lambda x: find_ngrams(x.split(" "), 3))
dfb=pd.DataFrame(tri)
dfb['sentence'] = [' '.join(map(str,i)) for i in dfb.index.tolist()]
arrv=[]
arrsd=[]
for i in tqdm(range(len(dfb))):
arr=[]
c1=0
c2=0
c3=0
s=dfb.iloc[i]['sentence']
bg=s.split()
for j in range(len(dfe)):
x=dfe.iloc[j]['trigrams']
for k in range(len(x)):
if(bg==list(x[k])):
c1=c1+1
for j in range(len(dfn)):
x=dfn.iloc[j]['trigrams']
for k in range(len(x)):
if(bg==list(x[k])):
c2=c2+1
for j in range(len(dfc)):
x=dfc.iloc[j]['trigrams']
for k in range(len(x)):
if(bg==list(x[k])):
c3=c3+1
if(c1+c2+c3>1):
arr.append(c1)
arr.append(c2)
arr.append(c3)
v=pd.Series(arr).var()
sd=pd.Series(arr).std()
arrv.append(v)
arrsd.append(sd)
else:
arrv.append(0)
arrsd.append(0)
btgv=arrv
btgstd=arrsd
print(pd.Series(arrv).sum()/len(df))
print(pd.Series(arrsd).sum()/len(df))
badvar= | pd.DataFrame(data=btgv,columns=['var']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import warnings
from sklearn import metrics
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.calibration import CalibratedClassifierCV
from .preprocessing import horizontal_to_camera
def plot_regressor_confusion(
performance_df,
log_xy=True,
log_z=True,
ax=None,
label_column='label',
prediction_column='label_prediction',
energy_unit='GeV'
):
ax = ax or plt.gca()
label = performance_df[label_column].copy()
prediction = performance_df[prediction_column].copy()
if log_xy is True:
label = np.log10(label)
prediction = np.log10(prediction)
limits = [
min(prediction.min(), label.min()),
max(prediction.max(), label.max()),
]
counts, x_edges, y_edges, img = ax.hist2d(
label,
prediction,
bins=[100, 100],
range=[limits, limits],
norm=LogNorm() if log_z is True else None,
)
img.set_rasterized(True)
ax.set_aspect(1)
ax.figure.colorbar(img, ax=ax)
if log_xy is True:
ax.set_xlabel(
rf'$\log_{{10}}(E_{{\mathrm{{MC}}}} \,\, / \,\, \mathrm{{{energy_unit}}})$'
)
ax.set_ylabel(
rf'$\log_{{10}}(E_{{\mathrm{{Est}}}} \,\, / \,\, \mathrm{{{energy_unit}}})$'
)
else:
ax.set_xlabel(
rf'$E_{{\mathrm{{MC}}}} \,\, / \,\, \mathrm{{{energy_unit}}}$'
)
ax.set_ylabel(
rf'$E_{{\mathrm{{Est}}}} \,\, / \,\, \mathrm{{{energy_unit}}}$'
)
return ax
def plot_bias_resolution(
performance_df,
bins=10,
ax=None,
label_column='label',
prediction_column='label_prediction',
energy_unit='GeV'
):
df = performance_df.copy()
ax = ax or plt.gca()
if np.isscalar(bins):
bins = np.logspace(
np.log10(df[label_column].min()),
np.log10(df[label_column].max()),
bins + 1
)
df['bin'] = np.digitize(df[label_column], bins)
df['rel_error'] = (df[prediction_column] - df[label_column]) / df[label_column]
binned = pd.DataFrame(index=np.arange(1, len(bins)))
binned['center'] = 0.5 * (bins[:-1] + bins[1:])
binned['width'] = np.diff(bins)
grouped = df.groupby('bin')
binned['bias'] = grouped['rel_error'].mean()
binned['bias_median'] = grouped['rel_error'].median()
binned['lower_sigma'] = grouped['rel_error'].agg(lambda s: np.percentile(s, 15))
binned['upper_sigma'] = grouped['rel_error'].agg(lambda s: np.percentile(s, 85))
binned['resolution_quantiles'] = (binned.upper_sigma - binned.lower_sigma) / 2
binned['resolution'] = grouped['rel_error'].std()
binned = binned[grouped.size() > 100] # at least fifty events
for key in ('bias', 'resolution', 'resolution_quantiles'):
if matplotlib.get_backend() == 'pgf' or plt.rcParams['text.usetex']:
label = key.replace('_', r'\_')
else:
label = key
ax.errorbar(
binned['center'],
binned[key],
xerr=0.5 * binned['width'],
label=label,
linestyle='',
)
ax.legend()
ax.set_xscale('log')
ax.set_xlabel(
rf'$\log_{{10}}(E_{{\mathrm{{MC}}}} \,\, / \,\, \mathrm{{{energy_unit}}})$'
)
return ax
def plot_roc(
performance_df,
model,
ax=None,
label_column='label',
score_column='scores',
):
ax = ax or plt.gca()
ax.axvline(0, color='lightgray')
ax.axvline(1, color='lightgray')
ax.axhline(0, color='lightgray')
ax.axhline(1, color='lightgray')
roc_aucs = []
mean_fpr, mean_tpr, _ = metrics.roc_curve(
performance_df[label_column],
performance_df[score_column],
)
for it, df in performance_df.groupby('cv_fold'):
fpr, tpr, _ = metrics.roc_curve(df[label_column], df[score_column])
roc_aucs.append(metrics.roc_auc_score(df[label_column], df[score_column]))
ax.plot(
fpr, tpr,
color='lightgray', lw=0.66 * plt.rcParams['lines.linewidth'],
label='Single CV ROC Curve' if it == 0 else None
)
ax.set_title('Mean area under curve: {:.4f} ± {:.4f}'.format(
np.mean(roc_aucs), np.std(roc_aucs)
))
ax.plot(mean_fpr, mean_tpr, label='Mean ROC curve')
ax.legend()
ax.set_aspect(1)
ax.set_xlabel('false positive rate')
ax.set_ylabel('true positive rate')
ax.figure.tight_layout()
return ax
def plot_scores(
performance_df,
model,
ax=None,
xlabel='score',
classnames={0: 'Proton', 1: 'Gamma'},
label_column='label',
score_column='score',
):
ax = ax or plt.gca()
if isinstance(model, CalibratedClassifierCV):
model = model.base_estimator
n_bins = (model.n_estimators + 1) if hasattr(model, 'n_estimators') else 100
bin_edges = np.linspace(
performance_df[score_column].min(),
performance_df[score_column].max(),
n_bins + 1,
)
for label, df in performance_df.groupby(label_column):
ax.hist(
df[score_column],
bins=bin_edges, label=classnames[label], histtype='step',
)
ax.set_xlabel(xlabel)
ax.legend()
ax.figure.tight_layout()
def plot_precision_recall(performance_df, model, score_column='score', ax=None, beta=0.1):
ax = ax or plt.gca()
if isinstance(model, CalibratedClassifierCV):
model = model.base_estimator
n_bins = (model.n_estimators + 1) if hasattr(model, 'n_estimators') else 100
thresholds = np.linspace(0, 1, n_bins + 1)
precision = []
recall = []
f_beta = []
ax.axvline(0, color='lightgray')
ax.axvline(1, color='lightgray')
ax.axhline(0, color='lightgray')
ax.axhline(1, color='lightgray')
for threshold in thresholds:
prediction = (performance_df[score_column] >= threshold).astype('int')
label = performance_df.label.values
precision.append(metrics.precision_score(label, prediction))
recall.append(metrics.recall_score(label, prediction))
f_beta.append(metrics.fbeta_score(label, prediction, beta=beta))
ax.plot(thresholds, precision, label='precision')
ax.plot(thresholds, recall, label='recall')
ax.plot(thresholds, f_beta, label='$f_{{{:.2f}}}$'.format(beta))
ax.legend()
ax.set_xlabel('prediction threshold')
ax.figure.tight_layout()
def plot_feature_importances(model, feature_names, ax=None, max_features=20):
ax = ax or plt.gca()
ypos = np.arange(1, len(feature_names[:max_features]) + 1)
if plt.rcParams['text.usetex'] or matplotlib.get_backend() == 'pgf':
feature_names = [f.replace('_', r'\_') for f in feature_names]
feature_names = np.array(feature_names)
if isinstance(model, CalibratedClassifierCV):
model = model.base_estimator
if hasattr(model, 'estimators_'):
feature_importances = np.array([
est.feature_importances_
for est in np.array(model.estimators_).ravel()
])
idx = np.argsort(np.median(feature_importances, axis=0))[-max_features:]
ax.boxplot(
feature_importances[:, idx],
vert=False,
sym='', # no outliers
medianprops={'color': 'C0'}
)
y_jittered = np.random.normal(ypos, 0.1, size=feature_importances[:, idx].shape)
for imp, y in zip(feature_importances.T[idx], y_jittered.T):
res = ax.scatter(imp, y, color='C1', alpha=0.5, lw=0, s=5)
res.set_rasterized(True)
else:
feature_importances = model.feature_importances_
idx = np.argsort(feature_importances)[-max_features:]
ax.barh(
ypos,
feature_importances[idx]
)
ax.set_ylim(ypos[0] - 0.5, ypos[-1] + 0.5)
ax.set_yticks(ypos)
ax.set_yticklabels(feature_names[idx])
ax.set_xlabel('Feature importance')
if len(feature_names) > max_features:
ax.set_title('The {} most important features'.format(max_features))
ax.figure.tight_layout()
def plot_true_delta_delta(data_df, model_config, ax=None):
df = data_df.copy()
source_x, source_y = horizontal_to_camera(df, model_config)
true_delta = np.arctan2(
source_y - df[model_config.cog_y_column],
source_x - df[model_config.cog_x_column],
)
ax.hist(true_delta - df[model_config.delta_column], bins=100, histtype='step')
ax.figure.tight_layout()
ax.set_xlabel(r'$\delta_{true}\,-\,\delta$')
return ax
def plot_energy_dependent_disp_metrics(df, true_energy_column, energy_unit='GeV', fig=None):
df = df.copy()
edges = 10**np.arange(
np.log10(df[true_energy_column].min()),
np.log10(df[true_energy_column].max()),
0.2
)
df['bin_idx'] = np.digitize(df[true_energy_column], edges)
def accuracy(group):
return metrics.accuracy_score(
group.sign,
group.sign_prediction,
)
def r2(group):
return metrics.r2_score(
np.abs(group.disp),
group.disp_prediction,
)
# discard under and overflow
df = df[(df['bin_idx'] != 0) & (df['bin_idx'] != len(edges))]
binned = pd.DataFrame({
'e_center': 0.5 * (edges[1:] + edges[:-1]),
'e_low': edges[:-1],
'e_high': edges[1:],
'e_width': np.diff(edges),
}, index=pd.Series(np.arange(1, len(edges)), name='bin_idx'))
r2_scores = pd.DataFrame(index=binned.index)
accuracies = | pd.DataFrame(index=binned.index) | pandas.DataFrame |
import os
import sys
import cv2
import glob
import hashlib
import numpy as np
import pandas as pd
from filelock import FileLock
from multiprocessing.pool import ThreadPool
import uoimdb as uo
from uoimdb.tagging.image_processing import ImageProcessor
from uoimdb.tagging.app import user_col
import traceback
class RandomSamples(object):
def __init__(self, imdb, image_processor=None):
self.imdb = imdb
self.image_processor = image_processor
self.random_sample_dir = imdb.cfg.RANDOM_SAMPLE_LOCATION
uo.utils.ensure_dir(self.random_sample_dir)
self.samples = {}
self._permutations = {} # stores the shuffle order for each user+sample
def load_samples(self, name='*'):
for f in glob.glob(os.path.join(self.random_sample_dir, '{}.csv'.format(name))):
self.load_sample(f) # loads into self.samples[name]
def load_sample(self, filename):
df = pd.read_csv(filename, index_col='src').fillna('')
name = os.path.splitext(os.path.basename(filename))[0]
for user in self.imdb.cfg.USERS:
# create columns that don't exist. resilient to new users
col = user_col('status', user)
if not col in df.columns:
df[col] = ''
# create sample shuffle order for all users
perm_id = user_col(name, user)
seed = int(hashlib.md5(perm_id.encode('utf-8')).hexdigest(), 16) % (2**32 - 1)
self._permutations[perm_id] = np.random.RandomState(seed=seed).permutation(len(df))
self.samples[name] = df
def user_sample_order(self, name, user=None):
order = self._permutations[user_col(name, user=user)] #
return self.samples[name].iloc[order]
def create_sample(self, sample, time_range=None, distance_to_gap=None, overlap_existing=None,
n_samples=None, overlap_ratio=None, ):
imdb = self.imdb
df = imdb.df
# filter by time range
if time_range is None:
time_range = imdb.cfg.FILTER_TIME_RANGE
if time_range:
morning, evening = time_range
if morning:
df = df[df.date.dt.hour >= morning]
if evening:
df = df[df.date.dt.hour < evening]
# dont take images close to a time gap
if distance_to_gap is None:
distance_to_gap = int(imdb.cfg.DISTANCE_FROM_GAP)
if distance_to_gap:
df = df[df.distance_to_gap >= distance_to_gap]
# create sample independent of other samples
if overlap_existing is not None:
for _, sample in self.samples.items():
df = df.drop(index=sample.index, errors='ignore')
if not len(df):
return False
names = []
if n_samples: # create several overlapping samples
overlap_ratio = float(overlap_ratio or imdb.cfg.SAMPLE_OVERLAP_RATIO)
n_needed = int(n * (n_samples - (n_samples - 1) * overlap_ratio))
full_sample = df.sample(n=min(n_needed, len(df))).index
for i in range(n_samples):
start = int(i * n * (1 - overlap_ratio))
sample = full_sample[start:start + n]
if len(sample):
sample_name = '{}-{}'.format(name, i+1)
self._create_sample_file(sample_name, sample)
names.append(sample_name)
else: # create a single sample
sample = df.sample(n=min(n, len(df))).index
self._create_sample_file(name, sample)
names.append(name)
return names
def _create_sample_file(self, name, sample):
filename = os.path.join(self.random_sample_dir, '{}.csv'.format(name))
# I know this isn't the most efficient way, but I'm creating user cols in load_..
pd.DataFrame(columns=[], index=sample).to_csv(filename)
self.load_sample(filename)
def save_user_sample(self, name):
filename = os.path.join(self.random_sample_dir, '{}.csv'.format(name))
my_sample = self.samples[name]
with FileLock(filename + '.lock', timeout=3): # handle concurrent access
# load existing sample
sample = pd.read_csv(filename, index_col='src')
# update all user columns
for col in my_sample.columns:
if col.startswith(user_col('')):
sample[col] = my_sample[col]
sample.to_csv(filename)
def delete_sample(self, sample):
for f in glob.glob(os.path.join(self.random_sample_dir, '{}.csv'.format(sample))):
name = os.path.splitext(os.path.basename(f))[0]
if name in self.samples:
del self.samples[name]
if os.path.isfile(f):
os.remove(f)
def gather_sample_srcs(self, sample, window=None):
sample_srcs = []
for f in glob.glob(os.path.join(self.random_sample_dir, '{}.csv'.format(sample))):
idx = | pd.read_csv(f, index_col='src') | pandas.read_csv |
## 1. Introduction ##
import pandas as pd
happiness2015 = pd.read_csv("World_Happiness_2015.csv")
happiness2016 = pd.read_csv("World_Happiness_2016.csv")
happiness2017 = pd.read_csv("World_Happiness_2017.csv")
happiness2015['Year'] = 2015
happiness2016['Year'] = 2016
happiness2017['Year'] = 2017
## 2. Combining Dataframes with the Concat Function ##
head_2015 = happiness2015[['Country','Happiness Score', 'Year']].head(3)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_axis0 = pd.concat([head_2015, head_2016])
concat_axis1 = pd.concat([head_2015, head_2016], axis=1)
question1 = 6
question2 = 3
## 3. Combining Dataframes with the Concat Function Continued ##
head_2015 = happiness2015[['Year','Country','Happiness Score', 'Standard Error']].head(4)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_axis0 = pd.concat([head_2015, head_2016])
rows = 7
columns = 4
## 4. Combining Dataframes with Different Shapes Using the Concat Function ##
head_2015 = happiness2015[['Year','Country','Happiness Score', 'Standard Error']].head(4)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_update_index = pd.concat([head_2015, head_2016], ignore_index=True)
## 5. Joining Dataframes with the Merge Function ##
three_2015 = happiness2015[['Country','Happiness Rank','Year']].iloc[2:5]
three_2016 = happiness2016[['Country','Happiness Rank','Year']].iloc[2:5]
merged = | pd.merge(left=three_2015, right=three_2016, on='Country') | pandas.merge |
from argparse import ArgumentParser
import os, sys
import cv2
import numpy as np
import pandas as pd
import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer, loggers
from torchsummary import summary
import torch.nn.functional as F
sys.path.append('../../loaders/pytorch_lightning/')
from datamodule import DataModule
from models.autoencoder import Autoencoder
from config import hparams
def train(lightning_model, hparams, logger):
train = pd.read_csv("../../splits/{}_{}_5000.csv".format(hparams.season, hparams.dataset))
train["DateTime"] = | pd.to_datetime(train['DateTime']) | pandas.to_datetime |
"""
Estimate results, inc. economic impacts.
Written by <NAME>.
February 2022.
"""
import os
import configparser
import pandas as pd
from tqdm import tqdm
import numpy as np
import geopandas as gpd
import rasterio
import random
from misc import params, technologies, get_countries, get_regions, get_scenarios
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
RESULTS = os.path.join(BASE_PATH, '..', 'results')
def query_hazard_layers(country, regions, technologies, scenarios):
"""
Query each hazard layer and estimate fragility.
"""
iso3 = country['iso3']
name = country['country']
regional_level = country['lowest']
gid_level = 'GID_{}'.format(regional_level)
filename = 'fragility_curve.csv'
path_fragility = os.path.join(DATA_RAW, filename)
f_curve = pd.read_csv(path_fragility)
f_curve = f_curve.to_dict('records')
for scenario in scenarios:
# if not scenario == 'data\processed\MWI\hazards\inunriver_rcp8p5_MIROC-ESM-CHEM_2080_rp01000.tif':
# continue
for technology in technologies:
output = []
for idx, region in regions.iterrows():
gid_id = region[gid_level]
scenario_name = os.path.basename(scenario)
# if not gid_id == 'MWI.13.12_1':
# continue
filename = '{}_{}_{}.shp'.format(gid_id, technology, scenario_name)
folder_out = os.path.join(DATA_PROCESSED, iso3, 'regional_data', gid_id, 'scenarios', scenario_name)
path_output = os.path.join(folder_out, filename)
if os.path.exists(path_output):
continue
filename = '{}_{}.shp'.format(technology, gid_id)
folder = os.path.join(DATA_PROCESSED, iso3, 'regional_data', gid_id, 'sites')
path = os.path.join(folder, filename)
if not os.path.exists(path):
continue
sites = gpd.read_file(path, crs='epsg:4326')#[:1]
failures = 0
for idx, site in sites.iterrows():
with rasterio.open(scenario) as src:
src.kwargs = {'nodata':255}
coords = [(site['geometry'].x, site['geometry'].y)]
depth = [sample[0] for sample in src.sample(coords)][0]
fragility = query_fragility_curve(f_curve, depth)
failure_prob = random.uniform(0, 1)
failed = (1 if failure_prob < fragility else 0)
if fragility > 0:
failures += 1
output.append({
'type': 'Feature',
'geometry': site['geometry'],
'properties': {
'radio': site['radio'],
'mcc': site['mcc'],
'net': site['net'],
'area': site['area'],
'cell': site['cell'],
'gid_level': gid_level,
'gid_id': region[gid_level],
'depth': depth,
'scenario': scenario_name,
'fragility': fragility,
'fail_prob': failure_prob,
'failure': failed,
'cost_usd': round(100000 * fragility),
# 'cell_id': site['cell_id'],
},
})
if len(output) == 0:
return
if not os.path.exists(folder_out):
os.makedirs(folder_out)
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
output.to_file(path_output, crs='epsg:4326')
return
def query_fragility_curve(f_curve, depth):
"""
Query the fragility curve.
"""
if depth < 0:
return 0
for item in f_curve:
if item['depth_lower_m'] <= depth < item['depth_upper_m']:
return item['fragility']
else:
continue
print('fragility curve failure: {}'.format(depth))
return 0
def econ_interim_impacts(country, technologies, scenarios):
"""
Estimate economic impacts.
Aqueduct flood scenarios as structured as follows:
floodtype_climatescenario_subsidence_year_returnperiod_projection
"""
iso3 = country['iso3']
filename = 'econ_interim_{}.csv'.format(iso3)
folder_out = os.path.join(RESULTS)
path_output = os.path.join(folder_out, filename)
# if os.path.exists(path_output):
# return
output = []
for scenario in tqdm(scenarios):
for technology in technologies:
scenario = os.path.basename(scenario)
filename = 'sites_{}_{}.csv'.format(technology, scenario)
folder = os.path.join(DATA_PROCESSED, country['iso3'], 'failed_sites')
path = os.path.join(folder, filename)
data = | pd.read_csv(path) | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import collections
import urllib.parse
import pkg_resources
import itertools
import qiime2
import skbio
import skbio.diversity
import scipy.spatial.distance
import numpy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import q2templates
from statsmodels.sandbox.stats.multicomp import multipletests
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta')
def bioenv(output_dir: str, distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.Metadata) -> None:
# convert metadata to numeric values where applicable, drop the non-numeric
# values, and then drop samples that contain NaNs
df = metadata.to_dataframe()
df = df.apply(lambda x: | pd.to_numeric(x, errors='ignore') | pandas.to_numeric |
import os
import sys
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
from dateutil.relativedelta import relativedelta
pkg_dir = os.path.join(os.path.dirname(__file__),'..')
sys.path.append(pkg_dir)
from silverpieces.functions import *
def fill_time_index(nd_array):
td = nd_array.shape[0]
for i in range(td):
nd_array[i,:,:] = i
def fill_year(nd_array):
start_time = datetime(2001,1,1)
td = nd_array.shape[0]
for i in range(td):
nd_array[i,:,:] = (start_time + relativedelta(days=i)).year - start_time.year
def create_daily_sp_cube(start_time, end_time, nx=2, ny=3, fun_fill=fill_time_index):
start_time = pd.to_datetime(start_time)
end_time = pd.to_datetime(end_time)
tdim = pd.date_range(start=start_time, end=end_time, freq='D')
xdim = np.arange(0, nx * 0.5 - 1e-2, 0.5)
ydim = np.arange(0.25, 0.25 + ny * 0.5 - 1e-2, 0.5)
x = np.empty([len(tdim), ny, nx])
fun_fill(x)
y = xr.DataArray(x,
coords=[tdim,ydim,xdim],
dims=['time', 'lat', 'lon'],
name='test_daily_data')
return y
def test_num_year_detection():
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2001-12-31') == 2
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2001-10-31') == 2
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-03-01', '2001-12-31') == 2
#
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2002-12-31') == 1
assert max_shifting_years('2001-01-01', '2003-12-30', '2001-01-01', '2002-12-31') == 0
# If the windows is less than a calendar year, it should still find the right max shift
assert max_shifting_years('2007-01-01', '2018-12-31', '2001-01-01', '2001-12-31') == 11
# 2016 a leap year...
assert max_shifting_years('2007-01-01', '2018-12-31', '2016-01-01', '2016-12-31') == 11
def test_periods_stat_yearly_stats():
start_time = pd.to_datetime('2001-01-01')
end_time = pd.to_datetime('2002-12-31')
x = create_daily_sp_cube('2001-01-01', '2009-12-31', nx=2, ny=3, fun_fill=fill_year)
s = SpatialTemporalDataArrayStat()
y = s.periods_stat_yearly(x, '2001-01-01', '2002-12-31')
assert len(y.time) == (9 - 2 + 1)
tdim = y[s.time_dimname].values
assert pd.to_datetime(tdim[0] ) == end_time
assert pd.to_datetime(tdim[-1]) == pd.to_datetime('2009-12-31')
assert np.all(y[0,:,:] == 365 * 1.0)
assert np.all(y[1,:,:] == 365 * (1.0 + 2.0))
y = s.periods_stat_yearly(x, '2001-01-01', '2002-12-31', func = np.mean)
assert np.all(y[0,:,:] == 0.5)
assert np.all(y[1,:,:] == 1.5)
y = s.periods_stat_yearly(x, '2004-01-01', '2005-12-31')
assert len(y.time) == (9 - 2 + 1)
tdim = y[s.time_dimname].values
assert pd.to_datetime(tdim[0] ) == end_time
def test_periods_stat_yearly_stats_leap_years():
# Noticed a bug in a notebook. Had to happen with leap yr.
x = create_daily_sp_cube('2007-01-01', '2018-12-31', nx=2, ny=3, fun_fill=fill_year)
s = SpatialTemporalDataArrayStat()
y = s.periods_stat_yearly(x, '2016-01-01', '2016-12-31')
assert len(y.time) == 12
tdim = y[s.time_dimname].values
assert pd.to_datetime(tdim[0]) == | pd.to_datetime('2007-12-31') | pandas.to_datetime |
import os
import ast
import math
import json
import logging
import pathlib
import numpy as np
import pandas as pd
import opendssdirect as dss
from .pydss_parameters import *
from jade.utils.timing_utils import track_timing, Timer
from disco import timer_stats_collector
from disco.enums import LoadMultiplierType
from disco.exceptions import (
OpenDssCompileError,
OpenDssConvergenceError,
UpgradesExternalCatalogRequired,
UpgradesExternalCatalogMissingObjectDefinition,
InvalidOpenDssElementError,
)
logger = logging.getLogger(__name__)
@track_timing(timer_stats_collector)
def reload_dss_circuit(dss_file_list, commands_list=None, **kwargs):
"""This function clears the circuit and loads dss files and commands.
Also solves the circuit and checks for convergence errors
Parameters
----------
dss_file_list
commands_list
Returns
-------
"""
logger.info("-> Reloading OpenDSS circuit")
check_dss_run_command("clear")
if dss_file_list is None:
raise Exception("No OpenDSS files have been passed to be loaded.")
for dss_file in dss_file_list:
logger.info(f"Redirecting {dss_file}.")
check_dss_run_command(f"Redirect {dss_file}")
dc_ac_ratio = kwargs.get('dc_ac_ratio', None)
if dc_ac_ratio is not None:
change_pv_pctpmpp(dc_ac_ratio=dc_ac_ratio)
if commands_list is not None:
logger.info(f"Running {len(commands_list)} dss commands")
for command_string in commands_list:
check_dss_run_command(command_string)
if "new " in command_string.lower():
check_dss_run_command("CalcVoltageBases")
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve:
pydss_params = define_initial_pydss_settings(**kwargs)
circuit_solve_and_check(raise_exception=True, **pydss_params)
return pydss_params
else:
circuit_solve_and_check(raise_exception=True)
return kwargs
def run_selective_master_dss(master_filepath, **kwargs):
"""This function executes master.dss file line by line and ignores some commands that Solve yearly mode,
export or plot data.
Parameters
----------
master_filepath
Returns
-------
"""
run_dir = os.getcwd()
check_dss_run_command("Clear")
# logger.info("-->Redirecting master file:")
# check_dss_run_command(f"Redirect {master_filepath}")
# do this instead of redirect master to ignore some lines (e.g., that solve for the whole year)
os.chdir(os.path.dirname(master_filepath))
logger.debug(master_filepath)
with open(master_filepath, "r") as fr:
tlines = fr.readlines()
for line in tlines:
if ('Solve'.lower() in line.lower()) or ('Export'.lower() in line.lower()) or ('Plot'.lower() in line.lower()):
logger.info(f"Skipping this line: {line}")
continue
else:
check_dss_run_command(f"{line}")
circuit_solve_and_check(raise_exception=True, **kwargs)
os.chdir(run_dir)
return
@track_timing(timer_stats_collector)
def circuit_solve_and_check(raise_exception=False, **kwargs):
"""This function solves the circuit (both OpenDSS and PyDSS-if enabled)
and can raise exception if convergence error occurs
Parameters
----------
raise_exception
kwargs
Returns
-------
"""
calcvoltagebases = kwargs.pop("calcvoltagebases", False)
if calcvoltagebases:
check_dss_run_command("CalcVoltageBases")
dss_pass_flag = dss_solve_and_check(raise_exception=raise_exception)
pass_flag = dss_pass_flag
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve: # if pydss solver is also to be used
pydss_pass_flag = pydss_solve_and_check(raise_exception=raise_exception, **kwargs)
pass_flag = dss_pass_flag and pydss_pass_flag
return pass_flag
def dss_solve_and_check(raise_exception=False):
"""This function solves OpenDSS and returns bool flag which shows if it has converged or not.
Parameters
----------
raise_exception
Returns
-------
bool
"""
dss.Solution.Solve()
logger.debug("Solving circuit using OpenDSS")
# check_dss_run_command('CalcVoltageBases')
dss_pass_flag = dss.Solution.Converged()
if not dss_pass_flag:
logger.info(f"OpenDSS Convergence Error")
if raise_exception:
raise OpenDssConvergenceError("OpenDSS solution did not converge")
return dss_pass_flag
def dss_run_command_list(command_list):
for command_string in command_list:
check_dss_run_command(command_string)
return
def write_text_file(string_list, text_file_path):
"""This function writes the string contents of a list to a text file
Parameters
----------
string_list
text_file_path
Returns
-------
"""
pathlib.Path(text_file_path).write_text("\n".join(string_list))
def create_upgraded_master_dss(dss_file_list, upgraded_master_dss_filepath):
"""Function to create master dss with redirects to upgrades dss file.
The redirect paths in this file are relative to the file"""
command_list = []
for filename in dss_file_list:
rel_filename = os.path.relpath(filename, os.path.dirname(upgraded_master_dss_filepath))
command_list.append(f"Redirect {rel_filename}")
return command_list
def create_dataframe_from_nested_dict(user_dict, index_names):
"""This function creates dataframe from a nested dictionary
Parameters
----------
user_dict
index_names
Returns
-------
DataFrame
"""
df = pd.DataFrame.from_dict({(i, j): user_dict[i][j]
for i in user_dict.keys()
for j in user_dict[i].keys()},
orient='index')
df.index.names = index_names
return df.reset_index()
def get_dictionary_of_duplicates(df, subset, index_field):
"""This creates a mapping dictionary of duplicate indices in a dataframe
Parameters
----------
df
subset
index_field
Returns
-------
Dictionary
"""
df.set_index(index_field, inplace=True)
df = df[df.duplicated(keep=False, subset=subset)]
tuple_list = df.groupby(subset).apply(lambda x: tuple(x.index)).tolist()
mapping_dict = {v: tup[0] for tup in tuple_list for v in tup}
return mapping_dict
def get_scenario_name(enable_pydss_solve, pydss_volt_var_model):
"""This function determines the controller scenario
Parameters
----------
enable_pydss_solve : bool
pydss_volt_var_model
Returns
-------
str
"""
if enable_pydss_solve:
# scenario = pydss_volt_var_model.control1 # TODO can read in name instead
scenario = "control_mode"
else:
scenario = "pf1"
return scenario
@track_timing(timer_stats_collector)
def change_pv_pctpmpp(dc_ac_ratio):
"""This function changes PV system pctpmpp based on passed dc-ac ratio
newpctpmpp = oldpctpmpp / dc_ac_ratio
"""
dss.PVsystems.First()
for i in range(dss.PVsystems.Count()):
newpctpmpp = int(dss.Properties.Value('%Pmpp')) / dc_ac_ratio
command_string = f"Edit PVSystem.{dss.PVsystems.Name()} %Pmpp={newpctpmpp}"
check_dss_run_command(command_string)
dss.PVsystems.Next()
def get_feeder_stats(dss):
"""This function gives metadata stats for a feeder
Parameters
----------
dss
Returns
-------
dict
"""
load_kw = 0
load_kVABase = 0
pv_kw = 0
pv_kVARated = 0
load_df = dss.utils.loads_to_dataframe()
if len(load_df) > 0:
load_kw = load_df['kW'].sum()
load_kVABase = load_df['kVABase'].sum()
pv_df = dss.utils.pvsystems_to_dataframe()
if len(pv_df) > 0:
pv_kw = pv_df['kW'].sum()
pv_kVARated = pv_df['kVARated'].sum()
data_dict = {
'total_load(kVABase)': load_kVABase,
'total_load(kW)': load_kw,
'total_PV(kW)': pv_kw,
'total_PV(kVARated)': pv_kVARated,
}
return data_dict
def get_upgrade_stage_stats(dss, upgrade_stage, upgrade_type, xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
"""This function gives upgrade stage stats for a feeder
upgrade_stage can be Initial or Final
upgrade_type can be thermal or voltage
"""
final_dict = {"stage": upgrade_stage, "upgrade_type": upgrade_type}
ckt_info_dict = get_circuit_info()
final_dict["feeder_components"] = ckt_info_dict
final_dict["feeder_components"].update({
"num_nodes": dss.Circuit.NumNodes(),
"num_loads": dss.Loads.Count(),
"num_lines": dss.Lines.Count(),
"num_transformers": dss.Transformers.Count(),
"num_pv_systems": dss.PVsystems.Count(),
"num_capacitors": dss.Capacitors.Count(),
"num_regulators": dss.RegControls.Count(),
} )
equipment_dict = combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs)
final_dict.update(equipment_dict)
return final_dict
def combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
line_properties = kwargs.get("line_properties", None)
xfmr_properties = kwargs.get("xfmr_properties", None)
voltage_properties = kwargs.get("voltage_properties", None)
final_dict = {}
if line_properties is None:
line_properties = ['name', 'phases','normamps', 'kV', 'line_placement', 'length', 'units', 'max_amp_loading',
'max_per_unit_loading', 'status']
if xfmr_properties is None:
xfmr_properties = ['name', 'phases', 'windings', 'conns', 'kVs', 'kVAs', 'amp_limit_per_phase','max_amp_loading',
'max_per_unit_loading', 'status']
if voltage_properties is None:
voltage_properties = ['name', 'Max per unit voltage', 'Min per unit voltage', 'Overvoltage violation',
'Max voltage_deviation', 'Undervoltage violation', 'Min voltage_deviation']
# some file reformatting
if "conns" in xfmr_properties:
xfmr_loading_df["conns"] = xfmr_loading_df["conns"].apply(ast.literal_eval)
if "kVs" in xfmr_properties:
xfmr_loading_df["kVs"] = xfmr_loading_df["kVs"].apply(ast.literal_eval)
if "windings" in xfmr_properties:
xfmr_loading_df["windings"] = xfmr_loading_df["windings"].astype(int)
final_dict.update({"transformer_loading": xfmr_loading_df[xfmr_properties].to_dict(orient="records")})
final_dict.update({"line_loading": line_loading_df[line_properties].to_dict(orient="records")})
final_dict.update({"bus_voltage": bus_voltages_df[voltage_properties].to_dict(orient="records")})
return final_dict
def get_circuit_info():
"""This collects circuit information: source bus, feeder head info, substation xfmr information
Returns
-------
Dictionary
"""
data_dict = {}
dss.Vsources.First()
data_dict['source_bus'] = dss.CktElement.BusNames()[0].split(".")[0]
data_dict["feeder_head_name"] = dss.Circuit.Name()
dss.Circuit.SetActiveBus(data_dict['source_bus'])
data_dict["feeder_head_basekv"] = dss.Bus.kVBase()
data_dict["source_num_nodes"] = dss.Bus.NumNodes()
data_dict["total_num_buses_in_circuit"] = len(dss.Circuit.AllBusNames())
if data_dict["source_num_nodes"] > 1:
data_dict["feeder_head_basekv"] = round(data_dict["feeder_head_basekv"] * math.sqrt(3), 1)
data_dict["substation_xfmr"] = None
all_xfmr_df = get_thermal_equipment_info(compute_loading=False, equipment_type="transformer")
all_xfmr_df["substation_xfmr_flag"] = all_xfmr_df.apply(lambda x: int(
data_dict["source_bus"].lower() in x['bus_names_only']), axis=1)
if len(all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] == True]) > 0:
data_dict["substation_xfmr"] = all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] ==
True].to_dict(orient='records')[0]
data_dict["substation_xfmr"]["kVs"] = ast.literal_eval(data_dict["substation_xfmr"]["kVs"])
# this checks if the voltage kVs are the same for the substation transformer
data_dict["substation_xfmr"]["is_autotransformer_flag"] = len(set(data_dict["substation_xfmr"]["kVs"])) <= 1
return data_dict
def create_opendss_definition(config_definition_dict, action_type="New", property_list=None):
"""This function creates an opendss element definition for any generic equipment
Returns
-------
str
"""
command_string = f"{action_type} {config_definition_dict['equipment_type']}.{config_definition_dict['name']}"
logger.debug(f"New {config_definition_dict['equipment_type']}.{config_definition_dict['name']} being defined")
# these properties contain data (refer OpenDSS manual for more information on these parameters)
if property_list is None:
property_list = list(set(config_definition_dict.keys()) - {"name", "equipment_type"})
empty_field_values = ["----", "nan", "NaN", "None", None, np.nan]
for property_name in property_list:
if isinstance(config_definition_dict[property_name], float):
if np.isnan(config_definition_dict[property_name]):
continue
if config_definition_dict[property_name] in empty_field_values:
continue
# if the value is not empty and is not nan, only then add it into the command string
temp_s = f" {property_name}={config_definition_dict[property_name]}"
command_string = command_string + temp_s
return command_string
def ensure_line_config_exists(chosen_option, new_config_type, external_upgrades_technical_catalog):
"""This function check if a line config exists in the network.
If it doesn't exist, it checks the external catalog (if available) and returns a new dss definition string.
Returns
-------
str
"""
existing_config_dict = {"linecode": get_line_code(), "geometry": get_line_geometry()}
new_config_name = chosen_option[new_config_type].lower()
# if linecode or linegeometry is not present in existing network definitions
if not existing_config_dict[new_config_type]["name"].str.lower().isin([new_config_name]).any():
# add definition for linecode or linegeometry
if external_upgrades_technical_catalog is None:
raise UpgradesExternalCatalogRequired(f"External upgrades technical catalog not available to determine line config type")
external_config_df = pd.DataFrame(external_upgrades_technical_catalog[new_config_type])
if external_config_df["name"].str.lower().isin([new_config_name]).any():
config_definition_df = external_config_df.loc[external_config_df["name"] == new_config_name]
config_definition_dict = dict(config_definition_df.iloc[0])
if config_definition_dict["normamps"] != chosen_option["normamps"]:
logger.warning(f"Mismatch between noramps for linecode {new_config_name} and chosen upgrade option normamps: {chosen_option['name']}")
# check format of certain fields
matrix_fields = [s for s in config_definition_dict.keys() if 'matrix' in s]
for field in matrix_fields:
config_definition_dict[field] = config_definition_dict[field].replace("'","")
config_definition_dict[field] = config_definition_dict[field].replace("[","(")
config_definition_dict[field] = config_definition_dict[field].replace("]",")")
command_string = create_opendss_definition(config_definition_dict=config_definition_dict)
else:
raise UpgradesExternalCatalogMissingObjectDefinition(
f"{new_config_type} definition for {new_config_name} not found in external catalog."
)
else:
command_string = None
return command_string
def get_present_loading_condition():
""" Get present loading condition for all loads
Returns
-------
DataFrame
"""
load_dict = {}
dss.Circuit.SetActiveClass("Load")
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
load_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next Load...
flag = dss.ActiveClass.Next()
load_df = pd.DataFrame.from_dict(load_dict, "index")
return load_df
def get_present_storage_condition():
""" Get present operating condition for all storage
Returns
-------
DataFrame
"""
storage_dict = {}
dss.Circuit.SetActiveClass('Storage')
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
storage_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next ...
flag = dss.ActiveClass.Next()
storage_df = pd.DataFrame.from_dict(storage_dict, "index")
return storage_df
def get_present_pvgeneration():
""" Get present generation for all pv systems
Returns
-------
DataFrame
"""
pv_dict = {}
dss.Circuit.SetActiveClass("PVSystem")
flag = dss.ActiveClass.First()
while flag:
pv_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kvar': float(dss.Properties.Value("kvar")),
'Irradiance': float(dss.Properties.Value("Irradiance")),
'connection': dss.Properties.Value("conn"),
'Pmpp': float(dss.Properties.Value("Pmpp")),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Voltages': dss.CktElement.Voltages(),
'VoltagesMagAng': dss.CktElement.VoltagesMagAng(),
'VoltagesMag': float(dss.CktElement.VoltagesMagAng()[0]),
}
flag = dss.ActiveClass.Next() > 0
pv_df = pd.DataFrame.from_dict(pv_dict, "index")
return pv_df
def get_all_transformer_info_instance(upper_limit=None, compute_loading=True):
"""This collects transformer information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("transformer")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df[["wdg", "phases"]] = all_df[["wdg", "phases"]].astype(int)
float_fields = ["kV", "kVA", "normhkVA", "emerghkVA", "%loadloss", "%noloadloss", "XHL", "XHT", "XLT", "%R",
"Rneut", "Xneut", "X12", "X13", "X23", "RdcOhms"]
all_df[float_fields] = all_df[float_fields].astype(float)
# define empty new columns
all_df['bus_names_only'] = None
all_df["amp_limit_per_phase"] = np.nan
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
# convert type from list to tuple since they are hashable objects (and can be indexed)
all_df.at[index, "kVs"] = [float(a) for a in row["kVs"]]
all_df.at[index, "kVAs"] = [float(a) for a in row["kVAs"]]
all_df.at[index, "Xscarray"] = [float(a) for a in row["Xscarray"]]
all_df.at[index, "%Rs"] = [float(a) for a in row["%Rs"]]
all_df.at[index, "bus_names_only"] = [a.split(".")[0].lower() for a in row["buses"]]
# first winding is considered primary winding
primary_kv = float(row["kVs"][0])
primary_kva = float(row["kVAs"][0])
if row["phases"] > 1:
amp_limit_per_phase = primary_kva / (primary_kv * math.sqrt(3))
elif row["phases"] == 1:
amp_limit_per_phase = primary_kva / primary_kv
else:
raise InvalidOpenDssElementError(f"Incorrect number of phases for transformer {row['name']}")
all_df.at[index, "amp_limit_per_phase"] = amp_limit_per_phase
if compute_loading:
if upper_limit is None:
raise Exception("Transformer upper limit is to be passed to function to compute transformer loading")
dss.Circuit.SetActiveElement("Transformer.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]] # extract elements based on num of ph
xfmr_current_magnitude = extract_magang[::2]
max_amp_loading = max(xfmr_current_magnitude)
max_per_unit_loading = round(max_amp_loading / amp_limit_per_phase, 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
# convert lists to string type (so they can be set as dataframe index later)
all_df[['conns', 'kVs']] = all_df[['conns', 'kVs']].astype(str)
all_df = all_df.reset_index(drop=True).set_index('name')
return all_df.reset_index()
def add_info_line_definition_type(all_df):
all_df["line_definition_type"] = "line_definition"
all_df.loc[all_df["linecode"] != "", "line_definition_type"] = "linecode"
all_df.loc[all_df["geometry"] != "", "line_definition_type"] = "geometry"
return all_df
def determine_line_placement(line_series):
""" Distinguish between overhead and underground cables
currently there is no way to distinguish directy using opendssdirect/pydss etc.
It is done here using property 'height' parameter and if string present in name
Parameters
----------
line_series
Returns
-------
dict
"""
info_dict = {}
info_dict["line_placement"] = None
if line_series["line_definition_type"] == "geometry":
dss.Circuit.SetActiveClass("linegeometry")
dss.ActiveClass.Name(line_series["geometry"])
h = float(dss.Properties.Value("h"))
info_dict["h"] = 0
if h >= 0:
info_dict["line_placement"] = "overhead"
else:
info_dict["line_placement"] = "underground"
else:
if ("oh" in line_series["geometry"].lower()) or ("oh" in line_series["linecode"].lower()):
info_dict["line_placement"] = "overhead"
elif ("ug" in line_series["geometry"].lower()) or ("ug" in line_series["linecode"].lower()):
info_dict["line_placement"] = "underground"
else:
info_dict["line_placement"] = None
return info_dict
def get_all_line_info_instance(upper_limit=None, compute_loading=True, ignore_switch=True):
"""This collects line information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("line")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df["phases"] = all_df["phases"].astype(int)
all_df[["normamps", "length"]] = all_df[["normamps", "length"]].astype(float)
all_df = add_info_line_definition_type(all_df)
# define empty new columns
all_df["kV"] = np.nan
all_df["h"] = np.nan
all_df["line_placement"] = ""
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
dss.Circuit.SetActiveBus(row["bus1"])
kv_b1 = dss.Bus.kVBase()
dss.Circuit.SetActiveBus(row["bus2"])
kv_b2 = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
if round(kv_b1) != round(kv_b2):
raise InvalidOpenDssElementError("To and from bus voltages ({} {}) do not match for line {}".format(
kv_b2, kv_b1, row['name']))
all_df.at[index, "kV"] = kv_b1
# Distinguish between overhead and underground cables
# currently there is no way to distinguish directy using opendssdirect/pydss etc.
# It is done here using property 'height' parameter and if string present in name
placement_dict = determine_line_placement(row)
for key in placement_dict.keys():
all_df.at[index, key] = placement_dict[key]
# if line loading is to be computed
if compute_loading:
if upper_limit is None:
raise Exception("Line upper limit is to be passed to function to compute line loading")
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]]
line_current = extract_magang[::2]
max_amp_loading = max(line_current)
max_per_unit_loading = round(max_amp_loading / row["normamps"], 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
all_df = all_df.reset_index(drop=True).set_index('name')
all_df["kV"] = all_df["kV"].round(5)
# add units to switch length (needed to plot graph). By default, length of switch is taken as max
all_df.loc[(all_df.units == 'none') & (all_df.Switch == True), 'units'] = 'm'
# if switch is to be ignored
if ignore_switch:
all_df = all_df.loc[all_df['Switch'] == False]
return all_df.reset_index()
def compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max"):
"""This function compares all dataframes in a given dictionary based on a deciding column name
Returns
-------
Dataframe
"""
summary_df = pd.DataFrame()
for df_name in comparison_dict.keys():
summary_df[df_name] = comparison_dict[df_name][deciding_column_name]
if comparison_type == "max":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has max
elif comparison_type == "min":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has min
else:
raise Exception(f"Unknown comparison type {comparison_type} passed.")
final_list = []
for index, label in label_df.iteritems(): # index is element name
temp_dict = dict(comparison_dict[label].loc[index])
temp_dict.update({"name": index})
final_list.append(temp_dict)
final_df = pd.DataFrame(final_list)
return final_df
@track_timing(timer_stats_collector)
def get_thermal_equipment_info(compute_loading, equipment_type, upper_limit=None, ignore_switch=False, **kwargs):
"""This function determines the thermal equipment loading (line, transformer), based on timepoint multiplier
Returns
-------
DataFrame
"""
timepoint_multipliers = kwargs.get("timepoint_multipliers", None)
multiplier_type = kwargs.get("multiplier_type", LoadMultiplierType.ORIGINAL)
# if there are no multipliers, run on rated load i.e. multiplier=1. 0
# if compute_loading is false, then just run once (no need to check multipliers)
if (timepoint_multipliers is None) or (not compute_loading) or (multiplier_type == LoadMultiplierType.ORIGINAL):
if compute_loading and multiplier_type != LoadMultiplierType.ORIGINAL:
apply_uniform_timepoint_multipliers(multiplier_name=1, field="with_pv", **kwargs)
if equipment_type == "line":
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type == "transformer":
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
return loading_df
if multiplier_type == LoadMultiplierType.UNIFORM:
comparison_dict = {}
for pv_field in timepoint_multipliers["load_multipliers"].keys():
logger.debug(pv_field)
for multiplier_name in timepoint_multipliers["load_multipliers"][pv_field]:
logger.debug("Multipler name: %s", multiplier_name)
# this changes the dss network load and pv
apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)
if equipment_type.lower() == "line":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type.lower() == "transformer":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
loading_df.set_index("name", inplace=True)
comparison_dict[pv_field+"_"+str(multiplier_name)] = loading_df
# compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)
loading_df = compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max")
else:
raise Exception(f"Undefined multiplier_type {multiplier_type} passed.")
return loading_df
def get_regcontrol_info(correct_PT_ratio=False, nominal_voltage=None):
"""This collects enabled regulator control information.
If correcting PT ratio, the following information is followed (based on OpenDSS documentation)
PT ratio: # If the winding is Wye, the line-to-neutral voltage is used. Else, the line-to-line voltage is used.
# Here, bus kV is taken from Bus.kVBase
Bus base kV: Returns L-L voltages for 2- and 3-phase. Else for 1-ph, return L-N voltage
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("regcontrol")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ['winding', 'vreg', 'band', 'ptratio', 'delay']
all_df[float_columns] = all_df[float_columns].astype(float)
all_df['at_substation_xfmr_flag'] = False # by default, reg control is considered to be not at substation xfmr
ckt_info_dict = get_circuit_info()
sub_xfmr_present = False
sub_xfmr_name = None
if ckt_info_dict['substation_xfmr'] is not None:
sub_xfmr_present = True
sub_xfmr_name = ckt_info_dict['substation_xfmr']['name']
if correct_PT_ratio:
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct regcontrol PT ratio.")
all_df['old_ptratio'] = all_df['ptratio']
for index, row in all_df.iterrows():
dss.Circuit.SetActiveElement("Regcontrol.{}".format(row["name"]))
reg_bus = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "reg_bus"] = reg_bus
dss.Circuit.SetActiveBus(reg_bus)
all_df.at[index, "bus_num_phases"] = dss.CktElement.NumPhases()
all_df.at[index, "bus_kv"] = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Transformer.{}".format(row["transformer"]))
all_df.at[index, "transformer_kva"] = float(dss.Properties.Value("kva"))
dss.Transformers.Wdg(1) # setting winding to 1, to get kV for winding 1
all_df.at[index, "transformer_kv"] = dss.Transformers.kV()
all_df.at[index, "transformer_conn"] = dss.Properties.Value("conn").replace(" ", "") # opendss returns conn with a space
all_df.at[index, "transformer_bus1"] = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "transformer_bus2"] = dss.CktElement.BusNames()[1].split(".")[0]
if correct_PT_ratio:
if (all_df.loc[index]["bus_num_phases"] > 1) and (all_df.loc[index]["transformer_conn"].lower() == "wye"):
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000 / math.sqrt(3)
else:
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000
# kV_to_be_used = dss.Bus.kVBase() * 1000
all_df.at[index, "ptratio"] = kV_to_be_used / nominal_voltage
if sub_xfmr_present and (row["transformer"] == sub_xfmr_name): # if reg control is at substation xfmr
all_df.at[index, 'at_substation_xfmr_flag'] = True
all_df = all_df.reset_index(drop=True).set_index('name')
all_df = all_df.loc[all_df['enabled'] == True]
return all_df.reset_index()
def get_capacitor_info(nominal_voltage=None, correct_PT_ratio=False):
"""
This collects capacitor information.
For correcting PT ratio, the following information and definitions are followed:
# cap banks are 3 phase, 2 phase or 1 phase. 1 phase caps will have LN voltage
# PT ratio: Ratio of the PT that converts the monitored voltage to the control voltage.
# If the capacitor is Wye, the 1st phase line-to-neutral voltage is monitored.
# Else, the line-to-line voltage (1st - 2nd phase) is monitored.
# Capacitor kv: Rated kV of the capacitor (not necessarily same as bus rating).
# For Phases=2 or Phases=3, it is line-to-line (phase-to-phase) rated voltage.
# For all other numbers of phases, it is actual rating. (For Delta connection this is always line-to-line rated voltage).
This function doesnt currently check if object is "enabled".
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capacitor")
if len(all_df) == 0:
return pd.DataFrame()
all_df["capacitor_name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["phases", "kv"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("capacitor_name")
# collect capcontrol information to combine with capcontrols
capcontrol_df = get_cap_control_info()
capcontrol_df.rename(columns={'name': 'capcontrol_name', 'capacitor': 'capacitor_name', 'type': 'capcontrol_type',
'equipment_type': 'capcontrol_present'}, inplace=True)
capcontrol_df = capcontrol_df.set_index("capacitor_name")
# with capacitor name as index, concatenate capacitor information with cap controls
# TODO are any other checks needed before concatenating dataframes? i.e. if capacitor is not present
all_df = pd.concat([all_df, capcontrol_df], axis=1)
all_df.index.name = 'capacitor_name'
all_df = all_df.reset_index().set_index('capacitor_name')
if correct_PT_ratio and (len(capcontrol_df) > 0):
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct capacitor bank PT ratio.")
all_df['old_PTratio'] = all_df['PTratio']
# iterate over all capacitors
for index, row in all_df.iterrows():
all_df.at[index, "kvar"] = [float(a) for a in row["kvar"]][0]
# if capcontrol type is empty, then that capacitor does not have controls
# correct PT ratios for existing cap controls
if correct_PT_ratio and (len(capcontrol_df) > 0):
if row["phases"] > 1 and row["conn"].lower() == "wye":
kv_to_be_used = (row['kv'] * 1000) / math.sqrt(3)
else:
kv_to_be_used = row['kv'] * 1000
all_df.at[index, "PTratio"] = kv_to_be_used / nominal_voltage
return all_df.reset_index()
def get_cap_control_info():
"""This collects capacitor control information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capcontrol")
if len(all_df) == 0:
capcontrol_columns = ['name', 'capacitor', 'type', 'equipment_type']
return pd.DataFrame(columns=capcontrol_columns)
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["CTPhase", "CTratio", "DeadTime", "Delay", "DelayOFF", "OFFsetting", "ONsetting", "PTratio",
"Vmax", "Vmin"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("name")
return all_df.reset_index()
def get_line_geometry():
"""This collects all line geometry information
Returns
-------
DataFrame
"""
active_class_name = 'linegeometry'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_line_code():
"""This collects all line codes information
Returns
-------
DataFrame
"""
active_class_name = 'linecode'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_wire_data():
"""This collects all wire data information
Returns
-------
DataFrame
"""
active_class_name = 'wiredata'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_cn_data():
"""This collects all cn data information
Returns
-------
DataFrame
"""
active_class_name = 'cndata'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def check_dss_run_command(command_string):
"""Runs dss command
And checks for exception
Parameters
----------
command_string : str
dss command to be run
Raises
-------
OpenDssCompileError
Raised if the command fails
"""
logger.debug(f"Running DSS command: {command_string}")
result = dss.run_command(f"{command_string}")
if result != "":
raise OpenDssCompileError(f"OpenDSS run_command failed with message: {result}. \nCommand: {command_string}")
@track_timing(timer_stats_collector)
def get_bus_voltages(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):
"""This function determines the voltages, based on timepoint multiplier
Returns
-------
DataFrame
"""
timepoint_multipliers = kwargs.get("timepoint_multipliers", None)
multiplier_type = kwargs.get("multiplier_type", LoadMultiplierType.ORIGINAL)
# if there are no multipliers, run on rated load i.e. multiplier=1. 0
# if compute_loading is false, then just run once (no need to check multipliers)
if (timepoint_multipliers is None) or (multiplier_type == LoadMultiplierType.ORIGINAL):
if multiplier_type != LoadMultiplierType.ORIGINAL:
apply_uniform_timepoint_multipliers(multiplier_name=1, field="with_pv", **kwargs)
# determine voltage violations after changes
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception,
**kwargs)
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
if multiplier_type == LoadMultiplierType.UNIFORM:
comparison_dict = {}
for pv_field in timepoint_multipliers["load_multipliers"].keys():
logger.debug(pv_field)
for multiplier_name in timepoint_multipliers["load_multipliers"][pv_field]:
logger.debug("Multipler name: %s", multiplier_name)
# this changes the dss network load and pv
apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception, **kwargs)
bus_voltages_df.set_index("name", inplace=True)
comparison_dict[pv_field+"_"+str(multiplier_name)] = bus_voltages_df
# compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)
deciding_column_dict = {"Max per unit voltage": "max", "Min per unit voltage": "min"}
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = compare_multiple_dataframes_voltage(comparison_dict=comparison_dict,
deciding_column_dict=deciding_column_dict,
voltage_upper_limit=voltage_upper_limit,
voltage_lower_limit=voltage_lower_limit)
else:
raise Exception(f"Undefined multiplier_type {multiplier_type} passed.")
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
@track_timing(timer_stats_collector)
def get_bus_voltages_instance(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):
"""This computes per unit voltages for all buses in network
Returns
-------
DataFrame
"""
circuit_solve_and_check(raise_exception=raise_exception, **kwargs) # this is added as a final check for convergence
all_dict = {}
all_bus_names = dss.Circuit.AllBusNames()
for bus_name in all_bus_names:
dss.Circuit.SetActiveBus(bus_name)
data_dict = {
"name": bus_name,
"voltages": dss.Bus.puVmagAngle()[::2],
# "kvbase": dss.Bus.kVBase(),
}
data_dict["Max per unit voltage"] = max(data_dict["voltages"])
data_dict["Min per unit voltage"] = min(data_dict["voltages"])
data_dict['Phase imbalance'] = data_dict["Max per unit voltage"] - data_dict["Min per unit voltage"]
# check for overvoltage violation
if data_dict["Max per unit voltage"] > voltage_upper_limit:
data_dict['Overvoltage violation'] = True
data_dict["Max voltage_deviation"] = data_dict["Max per unit voltage"] - voltage_upper_limit
else:
data_dict['Overvoltage violation'] = False
data_dict["Max voltage_deviation"] = 0.0
# check for undervoltage violation
if data_dict["Min per unit voltage"] < voltage_lower_limit:
data_dict['Undervoltage violation'] = True
data_dict["Min voltage_deviation"] = voltage_lower_limit - data_dict["Min per unit voltage"]
else:
data_dict['Undervoltage violation'] = False
data_dict["Min voltage_deviation"] = 0.0
all_dict[data_dict["name"]] = data_dict
all_df = pd.DataFrame.from_dict(all_dict, orient='index').reset_index(drop=True)
undervoltage_bus_list = list(all_df.loc[all_df['Undervoltage violation'] == True]['name'].unique())
overvoltage_bus_list = list(all_df.loc[all_df['Overvoltage violation'] == True]['name'].unique())
buses_with_violations = list(set(undervoltage_bus_list + overvoltage_bus_list))
return all_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
def compare_multiple_dataframes_voltage(comparison_dict, deciding_column_dict, voltage_upper_limit, voltage_lower_limit):
"""This function compares all dataframes in a given dictionary based on a deciding column
Returns
-------
Dataframe
"""
all_df = pd.DataFrame()
for deciding_column_name in deciding_column_dict.keys():
summary_df = pd.DataFrame()
comparison_type = deciding_column_dict[deciding_column_name]
for df_name in comparison_dict.keys():
label_df = pd.DataFrame()
summary_df[df_name] = comparison_dict[df_name][deciding_column_name]
if comparison_type == "max":
label_df[deciding_column_name] = summary_df.idxmax(axis=1) # find dataframe name that has max
elif comparison_type == "min":
label_df[deciding_column_name] = summary_df.idxmin(axis=1) # find dataframe name that has min
else:
raise Exception(f"Unknown comparison type {comparison_type} passed.")
final_list = []
for index, row in label_df.iterrows(): # index is element name
label = row[deciding_column_name]
temp_dict = {deciding_column_name: comparison_dict[label].loc[index][deciding_column_name]}
temp_dict.update({"name": index})
final_list.append(temp_dict)
temp_df = | pd.DataFrame(final_list) | pandas.DataFrame |
# from folders import dir_isomap_biclasse
from folders import dir_pca_biclasse, output_dir
from parameters import order, alphas
from statistics import Statistics
import pandas as pd
def main():
diag = Statistics()
#df = pd.read_csv('./../output_dir/results_multiclass_PCA.csv')
#diag.calcula_media_folds_multiclass(df)
#diag.separa_delaunay_biclass('./../output_dir/resultado_media_multiclass_PCA.csv')
# Remove others DTO from result file
GEOMETRY = '_delaunay_area_9'
df_best_dto = | pd.read_csv('./../output_dir/resultado_media_multiclass_PCA.csv') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by phyiscal location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = private + '_' + district.key + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = private + '_' + district.key + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries (minus deliveries made for recharge (accouted for above)
for contract in self.contract_list:
delivery_name = private + '_' + district.key + '_' + contract.name + '_delivery'
recharge_contract_name = private + '_' + district.key + '_' + contract.name + '_recharged'
flood_irr_name = private + '_' + district.key + '_' + contract.name + '_flood_irrigation'
flood_name = private + '_' + district.key + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
for bank_name in self.bank_list:
for partner_name in bank_name.participant_list:
account_name = bank_name.name + '_' + partner_name
if account_name in self.values:
annual_pumping = 0.0
yesterday_account = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[bank_name.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
today_account = self.values.loc[self.index[x], account_name]
annual_pumping += max(yesterday_account - today_account, 0.0)
yesterday_account = today_account * 1.0
self.total_pumping[bank_name.name][-1] += annual_pumping
if location_type == 'monthly':
district_irrigation = pd.DataFrame(index = date_list_labels)
district_recharge = pd.DataFrame(index = date_list_labels)
district_pumping = pd.DataFrame(index = date_list_labels)
district_recharge_sales = pd.DataFrame(index = date_list_labels)
district_recharge_purchases = pd.DataFrame(index = date_list_labels)
district_recovery_sales = pd.DataFrame(index = date_list_labels)
district_recovery_purchases = pd.DataFrame(index = date_list_labels)
district_flood_purchases = pd.DataFrame(index = date_list_labels)
district_recovery_rebate = pd.DataFrame(index = date_list_labels)
for y in name_bridge:
file_col_name = name_bridge[y]
if file_col_name in district_irrigation:
district_irrigation[file_col_name] += self.total_irrigation[y]
district_recharge[file_col_name] += self.total_recharge[y]
district_pumping[file_col_name] += self.total_pumping[y]
district_recharge_sales[file_col_name] += self.total_recharge_sales[y]
district_recharge_purchases[file_col_name] += self.total_recharge_purchases[y]
district_recovery_sales[file_col_name] += self.total_recovery_sales[y]
district_recovery_purchases[file_col_name] += self.total_recovery_purchases[y]
district_flood_purchases[file_col_name] += self.total_flood_purchases[y]
district_recovery_rebate[file_col_name] += self.total_recovery_rebate[y]
else:
district_irrigation[file_col_name] = self.total_irrigation[y]
district_recharge[file_col_name] = self.total_recharge[y]
district_pumping[file_col_name] = self.total_pumping[y]
district_recharge_sales[file_col_name] = self.total_recharge_sales[y]
district_recharge_purchases[file_col_name] = self.total_recharge_purchases[y]
district_recovery_sales[file_col_name] = self.total_recovery_sales[y]
district_recovery_purchases[file_col_name] = self.total_recovery_purchases[y]
district_flood_purchases[file_col_name] = self.total_flood_purchases[y]
district_recovery_rebate[file_col_name] = self.total_recovery_rebate[y]
elif location_type == 'annual':
district_irrigation = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recharge = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_pumping = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recharge_sales = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recharge_purchases = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recovery_sales = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recovery_purchases = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_flood_purchases = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
district_recovery_rebate = pd.DataFrame(index = np.arange(self.starting_year + 1,self.starting_year + self.number_years + 1))
self.total_irrigation_annual = {}
self.total_recharge_annual = {}
self.total_pumping_annual = {}
self.total_recharge_sales_annual = {}
self.total_recharge_purchases_annual = {}
self.total_recovery_sales_annual = {}
self.total_recovery_purchases_annual = {}
self.total_flood_purchases_annual = {}
self.total_recovery_rebate_annual = {}
for y in name_bridge:
self.total_irrigation_annual[y] = np.zeros(self.number_years)
self.total_recharge_annual[y] = np.zeros(self.number_years)
self.total_pumping_annual[y] = np.zeros(self.number_years)
self.total_recharge_sales_annual[y] = np.zeros(self.number_years)
self.total_recharge_purchases_annual[y] = np.zeros(self.number_years)
self.total_recovery_sales_annual[y] = np.zeros(self.number_years)
self.total_recovery_purchases_annual[y] = np.zeros(self.number_years)
self.total_flood_purchases_annual[y] = np.zeros(self.number_years)
self.total_recovery_rebate_annual[y] = np.zeros(self.number_years)
for xx in range(0, self.number_years):
self.total_irrigation_annual[y][xx] = np.sum(self.total_irrigation[y][(xx*12):(xx*12 + 12)])
self.total_recharge_annual[y][xx] = np.sum(self.total_recharge[y][(xx*12):(xx*12 + 12)])
self.total_pumping_annual[y][xx] = np.sum(self.total_pumping[y][(xx*12):(xx*12 + 12)])
self.total_recharge_sales_annual[y][xx] = np.sum(self.total_recharge_sales[y][(xx*12):(xx*12 + 12)])
self.total_recharge_purchases_annual[y][xx] = np.sum(self.total_recharge_purchases[y][(xx*12):(xx*12 + 12)])
self.total_recovery_sales_annual[y][xx] = np.sum(self.total_recovery_sales[y][(xx*12):(xx*12 + 12)])
self.total_recovery_purchases_annual[y][xx] = np.sum(self.total_recovery_purchases[y][(xx*12):(xx*12 + 12)])
self.total_flood_purchases_annual[y][xx] = np.sum(self.total_flood_purchases[y][(xx*12):(xx*12 + 12)])
self.total_recovery_rebate_annual[y][xx] = np.sum(self.total_recovery_rebate[y][(xx*12):(xx*12 + 12)])
file_col_name = name_bridge[y]
if file_col_name in district_irrigation:
district_irrigation[file_col_name] += self.total_irrigation_annual[y]
district_recharge[file_col_name] += self.total_recharge_annual[y]
district_pumping[file_col_name] += self.total_pumping_annual[y]
district_recharge_sales[file_col_name] += self.total_recharge_sales_annual[y]
district_recharge_purchases[file_col_name] += self.total_recharge_purchases_annual[y]
district_recovery_sales[file_col_name] += self.total_recovery_sales_annual[y]
district_recovery_purchases[file_col_name] += self.total_recovery_purchases_annual[y]
district_flood_purchases[file_col_name] += self.total_flood_purchases_annual[y]
district_recovery_rebate[file_col_name] += self.total_recovery_rebate_annual[y]
else:
district_irrigation[file_col_name] = self.total_irrigation_annual[y]
district_recharge[file_col_name] = self.total_recharge_annual[y]
district_pumping[file_col_name] = self.total_pumping_annual[y]
district_recharge_sales[file_col_name] = self.total_recharge_sales_annual[y]
district_recharge_purchases[file_col_name] = self.total_recharge_purchases_annual[y]
district_recovery_sales[file_col_name] = self.total_recovery_sales_annual[y]
district_recovery_purchases[file_col_name] = self.total_recovery_purchases_annual[y]
district_flood_purchases[file_col_name] = self.total_flood_purchases_annual[y]
district_recovery_rebate[file_col_name] = self.total_recovery_rebate_annual[y]
write_file = False
if write_file:
district_irrigation.to_csv(folder_name + 'irrigation_' + plot_name + '_' + scenario_name + '.csv')
district_recharge.to_csv(folder_name + 'recharge_' + plot_name + '_' + scenario_name + '.csv')
district_pumping.to_csv(folder_name + 'pumping_' + plot_name + '_' + scenario_name + '.csv')
district_recharge_sales.to_csv(folder_name + 'recharge_sales_' + plot_name + '_' + scenario_name + '.csv')
district_recharge_purchases.to_csv(folder_name + 'recharge_purchases_' + plot_name + '_' + scenario_name + '.csv')
district_recovery_sales.to_csv(folder_name + 'recovery_sales_' + plot_name + '_' + scenario_name + '.csv')
district_recovery_purchases.to_csv(folder_name + 'recovery_purchases_' + plot_name + '_' + scenario_name + '.csv')
district_flood_purchases.to_csv(folder_name + 'flood_purchases_' + plot_name + '_' + scenario_name + '.csv')
district_recovery_rebate.to_csv(folder_name + 'recovery_rebate_' + plot_name + '_' + scenario_name + '.csv')
if location_type == 'annual':
sns.set()
district_group_list = self.figure_params[figure_name][plot_name]['district_groups']
district_groups = {}
for x in district_group_list:
district_groups[x] = self.figure_params[figure_name][plot_name][x]
n_cols = self.figure_params[figure_name][plot_name]['subplot columns']
write_file = self.figure_params[figure_name][plot_name]['write file']
figure_color_map = self.figure_params[figure_name][plot_name]['color map']
n_rows = int(np.ceil(len(district_groups)/n_cols))
colors = sns.color_palette('BrBG_r', n_colors = 7)
fig, ax = plt.subplots(n_rows, n_cols, figsize = (16,12))
counter_x = 0
counter_y = 0
for x_cnt, x in enumerate(district_groups):
total_deliveries = np.zeros(self.number_years)
for y in district_groups[x]:
total_deliveries += self.total_irrigation_annual[y] + self.total_recharge_annual[y] + self.total_recharge_purchases_annual[y]
plotting_order = np.argsort(total_deliveries)
prev_list = np.zeros(self.number_years)
for y in district_groups[x]:
this_list = prev_list + self.total_pumping_annual[y] - self.total_recovery_rebate_annual[y]
ax[counter_x][counter_y].fill_between(np.arange(self.number_years), prev_list[plotting_order], this_list[plotting_order], color = colors[5], edgecolor = 'black')
prev_list = this_list * 1.0
for y in district_groups[x]:
this_list = prev_list + self.total_recovery_purchases_annual[y]
ax[counter_x][counter_y].fill_between(np.arange(self.number_years), prev_list[plotting_order], this_list[plotting_order], color = colors[4], edgecolor = 'black')
prev_list = this_list * 1.0
for y in district_groups[x]:
this_list = prev_list + self.total_irrigation_annual[y] - self.total_recovery_purchases_annual[y]
ax[counter_x][counter_y].fill_between(np.arange(self.number_years), prev_list[plotting_order], this_list[plotting_order], color = colors[0], edgecolor = 'black')
prev_list = this_list * 1.0
for y in district_groups[x]:
this_list = prev_list + self.total_recharge_annual[y]
ax[counter_x][counter_y].fill_between(np.arange(self.number_years), prev_list[plotting_order], this_list[plotting_order], color = colors[1], edgecolor = 'black')
prev_list = this_list * 1.0
for y in district_groups[x]:
this_list = prev_list + self.total_recharge_purchases_annual[y]
ax[counter_x][counter_y].fill_between(np.arange(self.number_years), prev_list[plotting_order], this_list[plotting_order], color = colors[2], edgecolor = 'black')
prev_list = this_list * 1.0
ax[counter_x][counter_y].set_title(x, fontsize = 16, weight = 'bold', fontname = 'Gill Sans MT')
ax[counter_x][counter_y].set_ylim([0, np.ceil(max(prev_list)/500.0) * 500.0])
counter_x += 1
if counter_x == n_rows:
counter_x = 0
counter_y += 1
for x in range(0, n_rows):
for y in range(0, n_cols):
ax[x][y].set_xlim([0, self.number_years])
ax[x][y].set_xticks([0, int(np.ceil(self.number_years/2)), self.number_years])
ax[x][y].set_xticklabels(['0%', '50%', '100%'])
if x == n_rows - 1:
ax[x][y].set_xlabel('Frequency of Years with Fewer Total Surface Water Deliveries', fontsize = 14, fontname = 'Gill Sans MT')
if y == 0:
ax[x][y].set_ylabel('Annual water use by type (tAF)', fontsize = 16, fontname = 'Gill Sans MT')
ax[x][y].grid(False)
for tick in ax[x][y].get_xticklabels():
tick.set_fontname('Gill Sans MT')
for tick in ax[x][y].get_yticklabels():
tick.set_fontname('Gill Sans MT')
legend_elements = []
legend_list = ['Consumptive Use', 'In-District Recharge', 'Out-of-District Recharge', 'Out-of-District GW Recovery', 'In-District GW Pumping']
color_numbers = [0, 1, 2, 4, 5]
for cnt, xx in enumerate(legend_list):
legend_elements.append(Patch(facecolor = colors[color_numbers[cnt]], edgecolor = 'black', label = xx))
ax[0][0].legend(handles = legend_elements, loc = 'lower right', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':10})
for x in range(0,2):
for y in range(0,2):
for tick in ax[x][y].get_xticklabels():
tick.set_fontname('Gill Sans MT')
for tick in ax[x][y].get_yticklabels():
tick.set_fontname('Gill Sans MT')
plt.savefig(folder_name + figure_name + '_' + plot_name + '_' + scenario_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def plot_forecasts(self, folder_name, figure_name, plot_name, n_colors, scatter_interval, range_sensitivity, show_plot):
predictor_list = self.figure_params[figure_name][plot_name]['predictor values']
forecast_list = self.figure_params[figure_name][plot_name]['forecast_values']
forecast_periods = self.figure_params[figure_name][plot_name]['forecast_periods']
non_log_list = self.figure_params[figure_name][plot_name]['non_log']
colorbar_labels = self.figure_params[figure_name][plot_name]['colorbar_label_list']
colorbar_index = self.figure_params[figure_name][plot_name]['colorbar_label_index']
all_cols = self.figure_params[figure_name][plot_name]['all_cols']
subplot_label = self.figure_params[figure_name][plot_name]['subplot_annotations']
watershed_keys = self.figure_params[figure_name][plot_name]['watershed_keys']
watershed_labels = self.figure_params[figure_name][plot_name]['watershed_labels']
#Initialize Figure
sns.set()
colors = sns.color_palette('YlGnBu_r', n_colors = n_colors)
num_cols = len(forecast_list)
num_rows = len(watershed_keys)
fig = plt.figure(figsize = (16, 10))
gs = gridspec.GridSpec(num_rows,num_cols)
#subplot counts
counter_x = 0
counter_y = 0
#Plot colorbar
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.9187, 0.15, 0.025, 0.7])
sm = plt.cm.ScalarMappable(cmap=pl.cm.YlGnBu_r, norm=plt.Normalize(vmin=0, vmax=n_colors))
clb1 = plt.colorbar(sm, cax = cbar_ax, ticks=colorbar_index)
clb1.ax.set_yticklabels(colorbar_labels)
clb1.ax.invert_yaxis()
clb1.ax.tick_params(labelsize=16)
for item in clb1.ax.yaxis.get_ticklabels():
item.set_fontname('Gill Sans MT')
##Loop through reservoirs
for key, key_label in zip(watershed_keys, watershed_labels):
#Get reservoir flow and snowpack timeseries
Q = self.observations['%s_inf' % key]
QMA = self.observations['%s_inf' % key].rolling(window=30).mean() * cfs_tafd
SNPK = self.observations['%s_snow' % key].values
scatterplot_values = pd.DataFrame(index = self.observations.index, columns = all_cols)
#record predictor and observed variables at each timestep
for x in range(0, len(Q)):
index_val = self.observations.index[x]
day_year_val = index_val.dayofyear
if index_val.month > 9:
dowy = max(day_year_val - 273, 0)
else:
dowy = min(day_year_val + 92, 364)
#Predictor variables
scatterplot_values.at[index_val, 'DOWY'] = dowy
scatterplot_values.at[index_val, 'Snowpack (SWE)'] = SNPK[x]
scatterplot_values.at[index_val, 'Mean Inflow, Prior 30 Days (tAF/day)'] = QMA[x]
#Observed variables (loop through list)
for count_f, forecast_pd in enumerate(forecast_periods):
if forecast_pd == 'SNOWMELT':
april_start = (181 - dowy) + x
july_end = (303 - dowy) + x
index_snowmelt = (self.observations.index > self.observations.index[april_start]) & (self.observations.index < self.observations.index[july_end])
scatterplot_values.at[index_val, forecast_list[count_f]] = np.sum(Q[index_snowmelt]) * cfs_tafd
else:
if x < len(Q) - forecast_pd:
index_f = (self.observations.index > self.observations.index[x]) & (self.observations.index < self.observations.index[x + forecast_pd - 1])
else:
index_f = (self.observations.index > self.observations.index[x]) & (self.observations.index < self.observations.index[len(Q) - 1])
scatterplot_values.at[index_val, forecast_list[count_f]] = np.sum(Q[index_f]) * cfs_tafd
##Find min/max values in each predictor/observed dataset, use to scale axis for each subplot
min_plot = {}
max_plot = {}
for aa in predictor_list:
min_plot[aa] = 999998
max_plot[aa] = -999999.9
for aa in forecast_list:
min_plot[aa] = 999998
max_plot[aa] = -999999.9
counter = 0
for dowy_loop in range(0, n_colors):
if counter == scatter_interval:
dowy_loop_values = scatterplot_values[scatterplot_values['DOWY'] == dowy_loop]
for index, row in dowy_loop_values.iterrows():
for aa, bb in zip(predictor_list, forecast_list):
if aa in non_log_list and ~ | pd.isnull(row[aa]) | pandas.isnull |
import os.path
import json
import zipfile
import numpy as np
import pandas as pd
import requests
from openpyxl import load_workbook
import ukcensusapi.Nomisweb as Api
import ukpopulation.utils as utils
class SNPPData:
"""
Functionality for downloading and collating UK Subnational Population Projection (NPP) data
Nomisweb stores the England data (only)
Wales/Scotland/NI are not the responsiblity of ONS and are made avilable online by the relevant statistical agency
"""
def __init__(self, cache_dir=utils.default_cache_dir()):
self.cache_dir = cache_dir
self.data_api = Api.Nomisweb(self.cache_dir)
self.data = {}
self.data[utils.EN] = self.__do_england()
self.data[utils.WA] = self.__do_wales()
self.data[utils.SC] = self.__do_scotland()
self.data[utils.NI] = self.__do_nireland()
# LADs * 26 years * 91 ages * 2 genders
# assert len(self.data) == (326+22+32+11) * 26 * 91 * 2
def min_year(self, code):
"""
Returns the first year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return min(self.data[code].PROJECTED_YEAR_NAME.unique())
def max_year(self, code):
"""
Returns the final year in the projection, assumes a single LAD or country code
"""
# convert to country if necessary
if "0" in code:
code = utils.country(code)[0]
return max(self.data[code].PROJECTED_YEAR_NAME.unique())
def all_lads(self, countries):
"""
Returns all the LAD codes in the country or countries specified
Supports EN WA SC NI EW GB UK
"""
if isinstance(countries, str):
countries = [countries]
lads = []
for country in countries:
if country in self.data:
lads.extend(self.data[country].GEOGRAPHY_CODE.unique())
else:
# warn if missing or invalid
print("WARNING: no LAD codes for country %s", country)
return lads
def filter(self, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
# convert inputs to arrays if single values supplied (for isin)
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
if np.isscalar(ages):
ages = [ages]
if np.isscalar(genders):
genders = [genders]
# Handle problem with empty list not being recognised as Null, was causing problem in utils.trim_range() below
if not years:
years = None
countries = utils.country(geog_codes)
# TODO fix incorrect assumption is that all countries have the same year range
years = utils.trim_range(years, self.min_year(countries[0]), self.max_year(countries[0]))
retval = pd.DataFrame() # {"GEOGRAPHY_CODE": [], "PROJECTED_YEAR_NAME": [], "C_AGE": [], "GENDER":[], "OBS_VALUE": []})
# loop over datasets as needed
for country in countries:
# apply filters
retval = retval.append(self.data[country][(self.data[country].GEOGRAPHY_CODE.isin(geog_codes)) &
(self.data[country].PROJECTED_YEAR_NAME.isin(years)) &
(self.data[country].C_AGE.isin(ages)) &
(self.data[country].GENDER.isin(genders))], ignore_index=True,
sort=False)
# check for any codes requested that werent present (this check is far easier to to on the result)
invalid_codes = np.setdiff1d(geog_codes, retval.GEOGRAPHY_CODE.unique())
if len(invalid_codes) > 0:
raise ValueError("Filter for LAD code(s): %s for years %s returned no data (check also age/gender filters)"
% (str(invalid_codes), str(years)))
return retval
def aggregate(self, categories, geog_codes, years=None, ages=range(0, 91), genders=[1, 2]):
data = self.filter(geog_codes, years, ages, genders)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
# year_range can include year that dont need to be extrapolated
# Filtering age and gender is not (currently) supported
def extrapolate(self, npp, geog_codes, year_range):
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
geog_codes = utils.split_by_country(geog_codes)
all_codes_all_years = pd.DataFrame()
for country in geog_codes:
if not geog_codes[country]: continue
max_year = self.max_year(country)
last_year = self.filter(geog_codes[country], max_year)
(in_range, ex_range) = utils.split_range(year_range, max_year)
# years that dont need to be extrapolated
all_years = self.filter(geog_codes[country], in_range) if in_range else pd.DataFrame()
for year in ex_range:
data = last_year.copy()
scaling = npp.year_ratio("ppp", country, max_year, year)
data = data.merge(scaling[["GENDER", "C_AGE", "OBS_VALUE"]], on=["GENDER", "C_AGE"])
data["OBS_VALUE"] = data.OBS_VALUE_x * data.OBS_VALUE_y
data.PROJECTED_YEAR_NAME = year
all_years = all_years.append(data.drop(["OBS_VALUE_x", "OBS_VALUE_y"], axis=1), ignore_index=True,
sort=False)
all_codes_all_years = all_codes_all_years.append(all_years, ignore_index=True, sort=False)
return all_codes_all_years
def extrapolagg(self, categories, npp, geog_codes, year_range):
"""
Extrapolate and then aggregate
"""
data = self.extrapolate(npp, geog_codes, year_range)
# invert categories (they're the ones to aggregate, not preserve)
return data.groupby(utils.check_and_invert(categories))["OBS_VALUE"].sum().reset_index()
def create_variant(self, variant_name, npp, geog_codes, year_range):
"""
Apply NPP variant to SNPP: SNPP(v) = SNPP(0) * sum(a,g) [ NPP(v) / NPP(0) ]
Preserves age-gender structure of SNPP data
"""
result = pd.DataFrame()
if isinstance(geog_codes, str):
geog_codes = [geog_codes]
for geog_code in geog_codes:
(pre_range, in_range) = utils.split_range(year_range, npp.min_year() - 1)
# for any years prior to NPP we just use the SNPP data as-is (i.e. "ppp")
pre_data = self.filter(geog_code, pre_range) if pre_range else pd.DataFrame()
if len(pre_data) > 0:
print("WARNING: variant {} not applied for years {} that predate the NPP data".format(variant_name,
pre_range))
# return if there's nothing in the NPP range
if not in_range:
result.append(pre_data)
continue
data = self.extrapolate(npp, geog_code, in_range).sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"]).reset_index(drop=True)
scaling = npp.variant_ratio(variant_name, utils.country(geog_code), year_range).reset_index().sort_values(
["C_AGE", "GENDER", "PROJECTED_YEAR_NAME"])
# scaling.to_csv(variant_name + ".csv", index=False)
print("DF: ", len(data), ":", len(scaling))
assert (len(data) == len(scaling))
data.OBS_VALUE = data.OBS_VALUE * scaling.OBS_VALUE
# prepend any pre-NPP data
result = result.append(pre_data.append(data))
return result
def __do_england(self):
# return self.__do_england_ons() # 2014
return self.__do_england_nomisweb() # 2018
# nomisweb data is now 2018-based
def __do_england_nomisweb(self):
print("Collating SNPP data for England...")
# need to do this in 2 batches as entire table has >1000000 rows
table_internal = "NM_2006_1" # SNPP
query_params = {
"gender": "1,2",
"c_age": "101...191",
"MEASURES": "20100",
"date": "latest", # 2018-based
"projected_year": "2018...2031",
"select": "geography_code,projected_year_name,gender,c_age,obs_value",
"geography": "1946157057...1946157382"
}
snpp_e = self.data_api.get_data(table_internal, query_params)
query_params["projected_year"] = "2032...2043"
snpp_e = snpp_e.append(self.data_api.get_data(table_internal, query_params))
# make age actual year
snpp_e.C_AGE = snpp_e.C_AGE - 101
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2016.csv")
# assert(len(snpp_e) == 26*2*91*326) # 326 LADs x 91 ages x 2 genders x 26 years
return snpp_e
# Alternative method of downloading the en data from ONS website(Only works with 2014 as it stands).
def __do_england_ons(self):
print("Collating SNPP data for England...")
england_src = "https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationprojections/datasets/localauthoritiesinenglandz1/2014based/snppz1population.zip"
england_raw = self.cache_dir + "/snpp_e.csv"
england_zip = self.cache_dir + "/snpp_e.zip"
if os.path.isfile(england_raw):
snpp_e = pd.read_csv(england_raw)
else:
response = requests.get(england_src)
with open(england_zip, 'wb') as fd:
for chunk in response.iter_content(chunk_size=1024):
fd.write(chunk)
print("Downloaded", england_zip)
z = zipfile.ZipFile(england_zip)
# print(z.namelist())
snpp_e = pd.DataFrame()
for gender in [1, 2]:
filename = "2014 SNPP Population " + ("males" if gender == 1 else "females") + ".csv"
chunk = pd.read_csv(z.open(filename)) \
.drop(["AREA_NAME", "COMPONENT", "SEX"], axis=1) \
.query('AGE_GROUP != "All ages"')
# .AGE_GROUP.replace({"90 and over": "90"}
chunk.AGE_GROUP = chunk.AGE_GROUP.replace({"90 and over": "90"})
chunk = chunk.melt(id_vars=["AREA_CODE", "AGE_GROUP"])
# chunk = chunk[chunk.AGE_GROUP != "all ages"]
# chunk = chunk.stack().reset_index()
chunk.columns = ["GEOGRAPHY_CODE", "C_AGE", "PROJECTED_YEAR_NAME", "OBS_VALUE"]
chunk["GENDER"] = gender
snpp_e = snpp_e.append(chunk)
# assert(len(snpp_e) == 26*2*91*326) # 326 districts x 91 ages x 2 genders x 26 years
snpp_e.to_csv(england_raw, index=False)
# snpp_e[(snpp_e.GEOGRAPHY_CODE=="E08000021") & (snpp_e.PROJECTED_YEAR_NAME==2039)].to_csv("snpp_ncle_2014.csv")
return snpp_e
# Wales
def __do_wales(self):
print("Collating SNPP data for Wales...")
cache_dir = utils.default_cache_dir()
wales_raw = cache_dir + "/snpp_w.csv"
if os.path.isfile(wales_raw):
snpp_w = | pd.read_csv(wales_raw) | pandas.read_csv |
from collections import defaultdict
import argparse
import sys
import pandas as pd
from sigtestv.database import ResultsDatabase
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--database-file', '-f', type=str, required=True)
parser.add_argument('--model-name', '-m', type=str, required=True)
parser.add_argument('--dataset-name', '-d', type=str, required=True)
parser.add_argument('--result-names', '-n', type=str, nargs='+', required=True)
parser.add_argument('--set-type', '-s', type=str, default='dev')
parser.add_argument('--option-names', '-o', type=str, nargs='+', default=[])
parser.add_argument('--option-values', '-v', type=str, nargs='+', default=[])
parser.add_argument('--extract-options', '-x', type=str, nargs='+', default=[])
args = parser.parse_args()
args.option_names = list(map(str.strip, args.option_names))
args.extract_options = list(map(str.strip, args.extract_options))
database = ResultsDatabase(args.database_file)
run_collection = database.fetch_all(args.model_name, args.dataset_name)
run_collection = run_collection.filter_by_options(dict(zip(args.option_names, args.option_values)))
result_names = set(args.result_names)
rc_results = run_collection.extract_results(result_names, args.set_type)
df_data = defaultdict(list)
for rc, results in rc_results:
df_data['model_name'].append(rc.model_name)
df_data['dataset_name'].append(rc.dataset_name)
for opt_name in args.extract_options:
df_data[opt_name].append(rc.attr(opt_name))
df_data['set_type'].append(args.set_type)
missing_names = result_names.copy()
for result in results:
df_data[result.name].append(result.value)
missing_names.remove(result.name)
for name in missing_names:
df_data[name].append(None)
| pd.DataFrame(df_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_floats_floats(df, right):
"""Test output for a single condition. ">"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints_extension_array(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_numeric(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_ints_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_floats_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["B", "Numeric"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_datetime(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["E", "Dates"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_string(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@pytest.mark.xfail(
reason="""sometimes, categories are coerced to objects;
might be a pandas version issue.
"""
)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_category(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
df = df.assign(C=df["C"].astype("category"))
right = right.assign(Strings=right["Strings"].astype("category"))
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_numeric(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_datetime(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["E", "Dates"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_left(df, right):
"""Test output when `how==left`. "<="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1, index=np.arange(len(df)))
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = df.join(
expected.filter(right.columns), how="left", sort=False
).reset_index(drop=True)
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="left", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_right(df, right):
"""Test output when `how==right`. ">"."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, index=np.arange(len(right))), on="t")
.query(f"{left_on} > {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = (
expected.filter(df.columns)
.join(right, how="right", sort=False)
.reset_index(drop=True)
)
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="right", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
| assert_frame_equal(expected, actual) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
########################################################################
"""
Module provides functions to prepare different datasets from EUAIMS.
"""
# Imports
import os
import json
import time
import urllib
import shutil
import pickle
import requests
import logging
import numpy as np
from collections import namedtuple
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import RobustScaler, OneHotEncoder, StandardScaler
from sklearn.linear_model import LinearRegression
from pynet.datasets import Fetchers
from neurocombat_sklearn import CombatModel as fortin_combat
from nibabel.freesurfer.mghformat import load as surface_loader
# Global parameters
Item = namedtuple("Item", ["train_input_path", "test_input_path",
"train_metadata_path", "test_metadata_path"])
COHORT_NAME = "EUAIMS"
FOLDER = "/neurospin/brainomics/2020_deepint/data"
SAVING_FOLDER = "/tmp/EUAIMS"
FILES = {
"stratification": os.path.join(FOLDER, "EUAIMS_stratification.tsv"),
"rois_mapper": os.path.join(FOLDER, "EUAIMS_rois.tsv"),
"surf_stratification": os.path.join(
FOLDER, "EUAIMS_surf_stratification.tsv")
}
DEFAULTS = {
"clinical": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True,
"drop_cols": ["t1:site", "t1:ageyrs", "t1:sex", "t1:fsiq",
"t1:group", "t1:diagnosis", "mri", "t1:group:name",
"qc", "labels", "subgroups"],
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"rois": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "adjust_sites": True,
"metrics": ["lgi:avg", "thick:avg", "surf:area"],
"roi_types": ["cortical"],
"residualize_by": {"continuous": ["t1:ageyrs", "t1:fsiq"],
"discrete": ["t1:sex"]},
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"genetic": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "scores": None,
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"surface": {
"test_size": 0.2, "seed": 42,
"return_data": False, "z_score": True, "adjust_sites": True,
"metrics": ["pial_lgi", "thickness"],
"residualize_by": {"continuous": ["t1:ageyrs", "t1:fsiq"],
"discrete": ["t1:sex"]},
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
},
"multiblock": {
"test_size": 0.2, "seed": 42,
"blocks": ["clinical", "surface-lh", "surface-rh", "genetic"],
"qc": {"t1:fsiq": {"gte": 70},
"mri": {"eq": 1},
"qc": {"eq": "include"}}
}
}
logger = logging.getLogger("pynet")
def apply_qc(data, prefix, qc):
""" applies quality control to the data
Parameters
----------
data: pandas DataFrame
data for which we control the quality
prefix: string
prefix of the column names
qc: dict
quality control dict. keys are the name of the columns
to control on, and values dict containing an order relationsip
and a value as items
Returns
-------
data: pandas DataFrame
selected data by the quality control
"""
idx_to_keep = pd.Series([True] * len(data))
relation_mapper = {
"gt": lambda x, y: x > y,
"lt": lambda x, y: x < y,
"gte": lambda x, y: x >= y,
"lte": lambda x, y: x <= y,
"eq": lambda x, y: x == y,
}
for name, controls in qc.items():
for relation, value in controls.items():
if relation not in relation_mapper.keys():
raise ValueError("The relationship {} provided is not a \
valid one".format(relation))
elif "{}{}".format(prefix, name) in data.columns:
new_idx = relation_mapper[relation](
data["{}{}".format(prefix, name)], value)
idx_to_keep = idx_to_keep & new_idx
return data[idx_to_keep]
def fetch_clinical_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, defaults=DEFAULTS['clinical']):
""" Fetcher wrapper for clinical data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
subject_columns_name: string, default 'subjects'
name of the column containing the subjects id
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher.
"""
fetcher_name = "fetcher_clinical_{}".format(cohort)
# @Fetchers.register
def fetch_clinical(
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"], z_score=defaults["z_score"],
drop_cols=defaults["drop_cols"], qc=defaults["qc"]):
""" Fetches and preprocesses clinical data
Parameters
----------
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
drop_cols: list of string, see default
names of the columns to drop before saving the data.
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "clinical_X_train.npy")
meta_path = os.path.join(datasetdir, "clinical_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "clinical_X_test.npy")
meta_path_test = os.path.join(datasetdir, "clinical_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
clinical_cols = [subject_column_name]
clinical_cols += [col for col in data.columns
if col.startswith(clinical_prefix)]
data = data[clinical_cols]
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(clinical_prefix, "")
for elem in data_train.columns]
X_train = data_train.drop(columns=drop_cols)
# Splits in train and test and removes nans
X_test, subj_test = (None, None)
if test_size > 0:
X_train, X_test = train_test_split(
X_train, test_size=test_size, random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
subj_test = X_test[subject_column_name].values
X_test = X_test.drop(columns=[subject_column_name]).values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
subj_train = X_train[subject_column_name].values
X_train = X_train.drop(columns=[subject_column_name])
cols = X_train.columns
X_train = X_train.values
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "clinical_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = pd.read_csv(meta_path, sep="\t")[
subject_column_name].values
X_test, subj_test = (None, None)
if test_size > 0:
X_test = np.load(path_test)
subj_test = pd.read_csv(meta_path_test, sep="\t")[
subject_column_name].values
return X_train, X_test, subj_train, subj_test
else:
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=meta_path,
test_metadata_path=meta_path_test)
return fetch_clinical
def fetch_rois_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, site_column_name="t1:site",
defaults=DEFAULTS['rois']):
""" Fetcher wrapper for rois data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
site_columns_name: string, default "t1:site"
name of the column containing the site of MRI acquisition
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
fetcher_name = "fetcher_rois_{}".format(cohort)
# @Fetchers.register
def fetch_rois(
metrics=defaults["metrics"], roi_types=defaults["roi_types"],
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"], z_score=defaults["z_score"],
adjust_sites=defaults["adjust_sites"],
residualize_by=defaults["residualize_by"], qc=defaults["qc"]):
""" Fetches and preprocesses roi data
Parameters
----------
datasetdir: string
path to the folder in which to save the data
metrics: list of strings, see default
metrics to fetch
roi_types: list of strings, default ["cortical"]
type of rois to fetch. Must be one of "cortical", "subcortical"
and "other"
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
adjust_sites: bool, default True
wether or not the correct site effects via the Combat algorithm
residualize_by: dict, see default
variables to residualize the data. Two keys, "continuous" and
"discrete", and the values are a list of the variable names
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
roi_prefix = "bloc-t1w_roi-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "rois_X_train.npy")
meta_path = os.path.join(datasetdir, "rois_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "rois_X_test.npy")
meta_path_test = os.path.join(datasetdir, "rois_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
roi_mapper = pd.read_csv(files["rois_mapper"], sep="\t")
# ROI selection
roi_label_range = pd.Series([False] * len(roi_mapper))
for roi_type in roi_types:
if roi_type == "cortical":
roi_label_range = roi_label_range | (
(roi_mapper["labels"] > 11000) &
(roi_mapper["labels"] < 13000))
elif roi_type == "subcortical":
roi_label_range = roi_label_range | (
roi_mapper["labels"] > 13000)
elif roi_type == "other":
roi_label_range = roi_label_range | (
roi_mapper["labels"] < 11000)
else:
raise ValueError("Roi types must be either 'cortical', \
'subcortical' or 'other'")
roi_labels = roi_mapper.loc[roi_label_range, "labels"]
# Feature selection
features_list = []
for column in data.columns:
if column.startswith(roi_prefix):
roi = int(column.split(":")[1].split("_")[0])
metric = column.split("-")[-1]
if roi in roi_labels.values and metric in metrics:
features_list.append(column.replace(roi_prefix, ""))
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(roi_prefix, "")
for elem in data_train.columns]
X_train = data_train[features_list].copy()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size,
random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
subj_test = data_test[subject_column_name].values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
subj_train = data_train[subject_column_name].values
cols = X_train.columns
# Correction for site effects
if adjust_sites:
for metric in metrics:
adjuster = fortin_combat()
features = [feature for feature in features_list
if metric in feature]
X_train[features] = adjuster.fit_transform(
X_train[features],
data_train[["{}{}".format(
clinical_prefix, site_column_name)]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
_path = os.path.join(
datasetdir, "rois_combat_{0}.pkl".format(metric))
with open(_path, "wb") as of:
pickle.dump(adjuster, of)
if test_size > 0:
X_test[features] = adjuster.transform(
X_test[features],
data_test[["{}{}".format(
clinical_prefix, site_column_name)]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
# Standardizes
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "rois_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
else:
X_train = X_train.values
if test_size > 0:
X_test = X_test.values
# Residualizes and scales
if residualize_by is not None or len(residualize_by) > 0:
regressor = LinearRegression()
y_train = np.concatenate([
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
regressor.fit(y_train, X_train)
X_train = X_train - regressor.predict(y_train)
_path = os.path.join(datasetdir, "rois_residualizer.pkl")
with open(_path, "wb") as f:
pickle.dump(regressor, f)
if test_size > 0:
y_test = np.concatenate([
data_test[[
"{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
X_test = X_test - regressor.predict(y_test)
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = pd.read_csv(meta_path, sep="\t")[
subject_column_name].values
X_test, subj_test = (None, None)
if test_size > 0:
X_test = np.load(path_test)
subj_test = pd.read_csv(meta_path_test, sep="\t")[
subject_column_name].values
return X_train, X_test, subj_train, subj_test
else:
return Item(train_input_path=path, test_input_path=path_test,
train_metadata_path=meta_path,
test_metadata_path=meta_path_test)
return fetch_rois
def fetch_surface_wrapper(hemisphere, datasetdir=SAVING_FOLDER,
files=FILES, cohort=COHORT_NAME,
site_column_name="t1:site",
defaults=DEFAULTS["surface"]):
""" Fetcher wrapper for surface data
Parameters
----------
hemisphere: string
name of the hemisphere data fetcher, one of "rh" or "lh"
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
site_columns_name: string, default "t1:site"
name of the column containing the site of MRI acquisition
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
assert(hemisphere in ["rh", "lh"])
fetcher_name = "fetcher_surface_{}_{}".format(hemisphere, cohort)
# @Fetchers.register
def fetch_surface(
metrics=defaults["metrics"],
test_size=defaults["test_size"], seed=defaults["seed"],
return_data=defaults["return_data"],
z_score=defaults["z_score"], adjust_sites=defaults["adjust_sites"],
residualize_by=defaults["residualize_by"], qc=defaults["qc"]):
""" Fetches and preprocesses surface data
Parameters
----------
metrics: list of strings, see defaults
metrics to fetch
test_size: float, default 0.2
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, default 42
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, default True
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
adjust_sites: bool, default True
wether or not the correct site effects via the Combat algorithm
residualize_by: dict, see default
variables to residualize the data. Two keys, "continuous" and
"discrete", and the values are a list of the variable names
qc: dict, see default
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array,
Training data, if return_data is True
X_test: numpy array,
Test data, if return_data is True and test_size > 0
subj_train: numpy array,
Training subjects, if return_data is True
subj_test: numpy array,
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
surf_prefix = "bloc-t1w_hemi-{}_metric".format(hemisphere)
data = pd.read_csv(files["clinical_surface"], sep="\t").drop(
columns=["bloc-t1w_hemi-lh_metric-area",
"bloc-t1w_hemi-rh_metric-area"])
# Feature selection
features_list = []
for metric in metrics:
for column in data.columns:
if column.startswith(surf_prefix):
m = column.split('-')[-1]
if m == metric:
features_list.append(column)
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
"participant_id")
# Loads surface data
n_vertices = len(
surface_loader(data_train[features_list[0]].iloc[0]).get_data())
X_train = np.zeros((len(data_train), n_vertices, len(features_list)))
for i in range(len(data_train)):
for j, feature in enumerate(features_list):
path = data_train[feature].iloc[i]
if not pd.isnull([path]):
X_train[i, :, j] = surface_loader(
path).get_data().squeeze()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size, random_state=seed)
na_idx_test = (np.isnan(X_test).sum((1, 2)) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
if return_data:
subj_test = data_test["participant_id"].values
na_idx_train = (np.isnan(X_train).sum((1, 2)) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
if return_data:
subj_train = data_train["participant_id"].values
# Applies feature-wise preprocessing
for i, feature in enumerate(features_list):
# Correction for site effects
if adjust_sites:
non_zeros_idx = (X_train[:, :, i] > 0).sum(0) >= 1
adjuster = fortin_combat()
X_train[:, non_zeros_idx, i] = adjuster.fit_transform(
X_train[:, non_zeros_idx, i],
data_train[["{}{}".format(
clinical_prefix, site_column_name)]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
path = os.path.join(
datasetdir,
"surface_{}_combat_feature{}.pkl".format(hemisphere, i))
with open(path, "wb") as f:
pickle.dump(adjuster, f)
if test_size > 0:
X_test[:, non_zeros_idx, i] = adjuster.transform(
X_test[:, non_zeros_idx, i],
data_test[["{}{}".format(
clinical_prefix, site_column_name)]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]],
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]])
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train[:, :, i] = scaler.fit_transform(X_train[:, :, i])
path = os.path.join(
datasetdir,
"surface_{}_scaler_feature{}.pkl".format(hemisphere, i))
with open(path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test[:, :, i] = scaler.transform(X_test[:, :, i])
# Residualizes
if residualize_by is not None or len(residualize_by) > 0:
regressor = LinearRegression()
y_train = np.concatenate([
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]].values,
OneHotEncoder(sparse=False).fit_transform(
data_train[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
regressor.fit(y_train, X_train[:, :, i])
X_train[:, :, i] = X_train[:, :, i] - regressor.predict(
y_train)
path = os.path.join(
datasetdir,
"surface_{}_residualizer_feature{}.pkl".format(
hemisphere, i))
with open(path, "wb") as f:
pickle.dump(regressor, f)
if test_size > 0:
y_test = np.concatenate([
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["continuous"]]
].values,
OneHotEncoder(sparse=False).fit_transform(
data_test[["{}{}".format(clinical_prefix, f)
for f in residualize_by["discrete"]]])
], axis=1)
X_test[:, :, i] = X_test[:, :, i] - regressor.predict(
y_test)
# Returns data and subjects
if return_data:
if test_size > 0:
return X_train, X_test, subj_train, subj_test
return X_train, subj_train
# Saving
path = os.path.join(
datasetdir, "surface_{}_X_train.npy".format(hemisphere))
np.save(path, X_train)
if test_size > 0:
path_test = os.path.join(
datasetdir, "surface_{}_X_test.npy".format(hemisphere))
np.save(path_test, X_test)
return path, path_test
return path
return fetch_surface
def fetch_genetic_wrapper(datasetdir=SAVING_FOLDER, files=FILES,
cohort=COHORT_NAME, defaults=DEFAULTS['genetic']):
""" Fetcher wrapper for genetic data
Parameters
----------
datasetdir: string, default SAVING_FOLDER
path to the folder in which to save the data
files: dict, default FILES
contains the paths to the different files
cohort: string, default COHORT_NAME,
name of the cohort
defaults: dict, default DEFAULTS
default values for the wrapped function
Returns
-------
fetcher: function
corresponding fetcher
"""
fetcher_name = "fetcher_genetic_{}".format(cohort)
# @Fetchers.register
def fetch_genetic(
scores=defaults["scores"], test_size=defaults["test_size"],
seed=defaults["seed"], return_data=defaults["return_data"],
z_score=defaults["z_score"], qc=defaults["qc"]):
""" Fetches and preprocesses genetic data
Parameters
----------
scores: list of strings, see defaults
scores to fetch, None mean it fetches all the available scores
test_size: float, see defaults
proportion of the dataset to keep for testing. Preprocessing models
will only be fitted on the training part and applied to the test
set. You can specify not to use a testing set by setting it to 0
seed: int, see default
random seed to split the data into train / test
return_data: bool, default False
If false, saves the data in the specified folder, and return the
path. Otherwise, returns the preprocessed data and the
corresponding subjects
z_score: bool, see defaults
wether or not to transform the data into z_scores, meaning
standardizing and scaling it
qc: dict, see defaults
keys are the name of the features the control on, values are the
requirements on their values (see the function apply_qc)
Returns
-------
item: namedtuple
a named tuple containing 'train_input_path', 'train_metadata_path',
and 'test_input_path', 'test_metadata_path' if test_size > 0
X_train: numpy array
Training data, if return_data is True
X_test: numpy array
Test data, if return_data is True and test_size > 0
subj_train: numpy array
Training subjects, if return_data is True
subj_test: numpy array
Test subjects, if return_data is True and test_size > 0
"""
clinical_prefix = "bloc-clinical_score-"
genetic_prefix = "bloc-genetic_score-"
subject_column_name = "participant_id"
path = os.path.join(datasetdir, "genetic_X_train.npy")
meta_path = os.path.join(datasetdir, "genetic_X_train.tsv")
path_test = None
meta_path_test = None
if test_size > 0:
path_test = os.path.join(datasetdir, "genetic_X_test.npy")
meta_path_test = os.path.join(datasetdir, "genetic_X_test.tsv")
if not os.path.isfile(path):
data = pd.read_csv(files["stratification"], sep="\t")
# Feature selection
features_list = []
for column in data.columns:
if column.startswith(genetic_prefix):
score = column.split("-")[-1]
if scores is not None and score in scores:
features_list.append(
column.replace(genetic_prefix, ""))
elif scores is None:
features_list.append(
column.replace(genetic_prefix, ""))
data_train = apply_qc(data, clinical_prefix, qc).sort_values(
subject_column_name)
data_train.columns = [elem.replace(genetic_prefix, "")
for elem in data_train.columns]
X_train = data_train[features_list].copy()
# Splits in train and test and removes nans
if test_size > 0:
X_train, X_test, data_train, data_test = train_test_split(
X_train, data_train, test_size=test_size,
random_state=seed)
na_idx_test = (X_test.isna().sum(1) == 0)
X_test = X_test[na_idx_test]
data_test = data_test[na_idx_test]
subj_test = data_test[subject_column_name].values
na_idx_train = (X_train.isna().sum(1) == 0)
X_train = X_train[na_idx_train]
data_train = data_train[na_idx_train]
subj_train = data_train[subject_column_name].values
cols = X_train.columns
# Standardizes and scales
if z_score:
scaler = RobustScaler()
X_train = scaler.fit_transform(X_train)
_path = os.path.join(datasetdir, "genetic_scaler.pkl")
with open(_path, "wb") as f:
pickle.dump(scaler, f)
if test_size > 0:
X_test = scaler.transform(X_test)
else:
X_train = X_train.values
if test_size > 0:
X_test = X_test.values
# Return data and subjects
X_train_df = pd.DataFrame(data=X_train, columns=cols)
X_train_df.insert(0, subject_column_name, subj_train)
X_test_df = None
if test_size > 0:
X_test_df = pd.DataFrame(data=X_test, columns=cols)
X_test_df.insert(0, subject_column_name, subj_test)
# Saving
np.save(path, X_train)
X_train_df.to_csv(meta_path, index=False, sep="\t")
if test_size > 0:
np.save(path_test, X_test)
X_test_df.to_csv(meta_path_test, index=False, sep="\t")
if return_data:
X_train = np.load(path)
subj_train = | pd.read_csv(meta_path, sep="\t") | pandas.read_csv |
## main2a* is for plotting weights
# main2a1 is for preparing this data
#
# uses data from:
# reduced_model_results_sbrc/no_opto
# reduced_model_results_sbrc/no_opto_no_licks1
# reduced_model_results_sbrc_subsampling/no_opto
# DATASET/features
import json
import os
import pandas
import numpy as np
import my.decoders
## Parameters
with open('../parameters') as fi:
params = json.load(fi)
## Which models to load
# Include everything needed by main2a2 or main3b
reduced_models = [
'contact_binarized+anti_contact_count+angle',
'contact_binarized+anti_contact_count+angle+anti_angle_max',
]
# Partition features with these names as raw
raw_features_names = [
'contact_binarized',
'contact_count_total',
'contact_count_by_time',
'contact_count_by_whisker',
'contact_count_total',
'contact_interaction',
'contact_interaction_count_by_label',
'contact_surplus',
'task',
'anti_contact_count',
]
## Which datasets to include
# This goes dirname, dataset, model
iterations = [
('reduced_model_results_sbrc', 'no_opto', 'contact_binarized+anti_contact_count+angle',),
('reduced_model_results_sbrc', 'no_opto', 'contact_binarized+anti_contact_count+angle+anti_angle_max',),
('reduced_model_results_sbrc', 'no_opto_no_licks1', 'contact_binarized+anti_contact_count+angle',),
('reduced_model_results_sbrc_subsampling', 'no_opto', 'contact_binarized+anti_contact_count+angle',),
]
## Load data from each dataset * model in turn
weights_part_l = []
icpt_transformed_part_l = []
keys_l = []
for dirname, dataset, model in iterations:
## Identify subsampling or not
if 'subsampling' in dirname:
subsampling = True
else:
subsampling = False
## Load features
# We need these to partition the results from the reduced models
print("loading features")
full_model_features = pandas.read_pickle(os.path.join(
params['logreg_dir'], 'datasets', dataset, 'features'))
print("done")
## Path to model results
# Path to model
model_dir = os.path.join(
params['logreg_dir'], dirname, dataset, model)
## Load
model_res = my.decoders.load_model_results(model_dir)
## Extract the features used in this model from the full set
model_features = full_model_features.loc[:,
model_res['weights'].columns]
# Remove levels
model_features.index = model_features.index.remove_unused_levels()
model_res['weights'].index = model_res[
'weights'].index.remove_unused_levels()
## Partition
# Identify which features are raw
raw_mask = pandas.Series(
model_res['weights'].columns.get_level_values('metric').isin(
raw_features_names),
index=model_res['weights'].columns)
# TODO: remove dependence on model_features here, by partitioning
# just the weights, not the features
part_res = my.decoders.partition(model_features, model_res, raw_mask)
## Add mouse as level
part_res['weights_part'].index = pandas.MultiIndex.from_tuples([
(session.split('_')[1], session, decode_label)
for session, decode_label in part_res['weights_part'].index],
names=['mouse', 'session', 'decode_label'])
part_res['icpt_transformed_part'].index = pandas.MultiIndex.from_tuples([
(session.split('_')[1], session, decode_label)
for session, decode_label in part_res['icpt_transformed_part'].index],
names=['mouse', 'session', 'decode_label'])
## Store
weights_part_l.append(part_res['weights_part'])
icpt_transformed_part_l.append(part_res['icpt_transformed_part'])
keys_l.append((subsampling, dataset, model))
## Concat
big_weights_part = pandas.concat(
weights_part_l, axis=1, keys=keys_l, names=['subsampling', 'dataset', 'model'])
big_icpt_part = | pandas.concat(
icpt_transformed_part_l, axis=1, keys=keys_l, names=['subsampling', 'dataset', 'model']) | pandas.concat |
from .metrics import accuracy
from .metrics import topk_acc
from .metrics import generalized_distance_matrix
from .metrics import generalized_distance_matrix_torch
from .chemspace import get_drug_batch
from typing import Optional, Sequence, Tuple, Union
import scipy.io as sio
import scipy.stats as st
from scipy import sparse
import numpy as np
import pandas as pd
import anndata as ad
import seaborn as sns
import networkx as nx
import community
import toolz as tz
import tqdm
import os
import collections
from sklearn import metrics
from sklearn.utils import sparsefuncs
from sklearn.neighbors import kneighbors_graph
from sklearn.mixture import GaussianMixture as GMM
from joblib import Parallel, delayed
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, IterableDataset, DataLoader
import torch_geometric
from torch_geometric.data import Batch
from rdkit import Chem
from rdkit.Chem import AllChem, Draw
import matplotlib.pyplot as plt
# TO-DO: Refactor trainers to work with Data objects,
# for different models not to be hard coded only based
# on their number of inputs (e.g. supervised {x,y},
# cond_generator {x,g}, etc...)
class Data:
"""
Abstract data class to wrap data in ML models.
"""
def __init__(
self,
x,
y=None,
g=None):
"""
Params
------
x (np.array | torch.tensor):
Input data to the model
"""
self.x = x
self.y = y
self.g = g
def train_supervised_gcn(
model:nn.Module,
data:torch_geometric.data.Data,
loss_fn,
optimizer,
multiclass = False,
n_out = 1
)->Tuple[float, float]:
"""
Single fwd-bwd pass on GraphConvNet model.
Returns loss and accuracy.
"""
y_true = torch.tensor(data.y, dtype = torch.long)
optimizer.zero_grad()
y_pred = model(data)
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(
y_pred.float(),
y_true.reshape(-1, n_out).float()
)
acc = accuracy(y_pred, y_true)
loss.backward()
optimizer.step()
return loss, acc
def val_supervised_gcn(
model, data, loss_fn, multiclass = False, n_out = 1
)-> float:
y_pred = model(data)
#y_true = torch.from_numpy(np.array(data.y, dtype=np.int16)) #, device = device)
y_true = torch.tensor(data.y, dtype = torch.long)
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss = loss_fn(
y_pred.float(),
y_true.reshape(-1, n_out).float()
)
acc = accuracy(y_pred, y_true)
return loss.mean(), acc
def supervised_trainer_gcn(
n_epochs:int,
train_loader,
val_loader,
model,
criterion,
optimizer,
multiclass= False,
n_classes = 1,
logs_per_epoch = 5,
model_dir:str = None,
model_name:str = None,
early_stopping_tol:float = 0.3,
force_cpu=False
)-> Tuple[list, np.ndarray, np.ndarray]:
"""
Wrapper function to train a GNN, returns train and val loss, and val accuracy.
Currently designed for classification problems.
Params
------
n_epochs (int)
Number of forward-backward passes through all the training data.
train_loader, val_loader
torch_geometric.data.Dataloaders of training and validation set.
The validation set is used for estimating model convergence.
model (nn.Module)
Supervised neural net model.
criterion (torch.nn.modules.loss object)
Loss function.
optimizer (torch.optim object)
Optimizer, e.g. Adam or RMSProp.
multiclass (bool, default = False)
Whether the model is a softmax classification model.
n_classes (int, default = 1)
Dimensionality of output dimension.
model_dir (str, default = None)
Path to store trained models.
If set to None it will not store the model's weights.
model_name (str, default = None)
Filename of the model to be stored. If set to None and `model_dir` is specified,
the model will be stored as `model.pt`.
early_stopping_tol (float, default = 0.1)
Tolerance to stop the training.
It is used as the fractional increase in the validation loss
in order to stop the training. I.e. in pseudocode:
Stop if val_loss[i] > (1+early_stopping_tol)*val_loss[i-1]
The higher the value the more tolerant to run for the number of epochs.
If the value is small the traning loop can be too sensitive to small
increases in the validation loss.
"""
batch_size = train_loader.batch_size
print_every = np.floor(train_loader.dataset.__len__() / batch_size / logs_per_epoch) # minibatches
train_loss_vector = [] # to store training loss
val_loss_vector = np.empty(shape = n_epochs)
val_acc_vector = np.empty(shape = n_epochs)
cuda = False if force_cpu else torch.cuda.is_available()
if cuda and not force_cpu:
device = try_gpu()
torch.cuda.set_device(device)
model = model.to(device)
for epoch in np.arange(n_epochs):
running_loss = 0
# TRAINING LOOP
for ix, data in tqdm.tqdm(enumerate(train_loader)):
#input_tensor = data.view(batch_size, -1).float()
if cuda:
#data.edge_attr = data.edge_attr.cuda()
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
train_loss, train_acc = train_supervised_gcn(
model,
data, # graph and label in data object
criterion,
optimizer,
multiclass=multiclass,
n_out =n_classes
)
running_loss += train_loss.item()
# Print loss
if ix % print_every == print_every -1 :
# Print average loss
print('[%d, %5d] loss: %.3f' %
(epoch + 1, ix+1, running_loss / print_every))
train_loss_vector.append(running_loss / print_every)
# Reinitialize loss
running_loss = 0.0
# VALIDATION LOOP
model.eval()
with torch.no_grad():
validation_loss = []
val_accuracy = []
for i, data in enumerate(tqdm.tqdm(val_loader)):
if cuda:
data.edge_attr = data.edge_attr.cuda()
data.edge_index = data.edge_index.cuda()
data.x = data.x.cuda()
data.y = torch.tensor(data.y, device = device)
data.ptr = data.ptr.cuda()
data.batch = data.batch.cuda()
val_loss, val_acc = val_supervised_gcn(
model, data, criterion, multiclass, n_classes
)
validation_loss.append(val_loss)
val_accuracy.append(val_acc)
mean_val_loss = torch.tensor(validation_loss).mean()
mean_accuracy = torch.tensor(val_accuracy).mean()
val_loss_vector[epoch] = mean_val_loss
val_acc_vector[epoch] = mean_accuracy
print('Val. loss %.3f'% mean_val_loss)
print('Val. acc %.3f'% (mean_accuracy*100))
# EARLY STOPPING LOOP
if epoch > 0:
if val_loss_vector[epoch] > (1+early_stopping_tol)*val_loss_vector[epoch-1]:
print('Finished by early stopping at epoch %d'%(epoch))
return train_loss_vector, val_loss_vector, val_acc_vector
# SAVE MODEL
if model_dir is not None:
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if model_name is not None:
torch.save(
model.state_dict(),
os.path.join(model_dir, model_name + '_' + str(epoch) + '.pt')
)
else:
torch.save(
model.state_dict(),
os.path.join(model_dir, 'model' + '_' + str(epoch) + '.pt')
)
print('Finished training')
return train_loss_vector, val_loss_vector, val_acc_vector
def train_supervised(
model,
input_tensor,
y_true,
loss_fn,
optimizer,
multiclass =False,
n_out = 1,
):
"""
Wrapper function to make forward and backward pass with minibatch
using a supervised model (classification or regression).
Params
------
n_out (int, default = 1)
Dimensionality of output dimension. Leave as 1 for multiclass,
i.e. the output is a probability distribution over classes (e.g. MNIST).
"""
# Zero out grads
model.zero_grad()
y_pred = model(input_tensor)
#Note that if it's a multiclass classification (i.e. the output is a
# probability distribution over classes) the loss_fn
# nn.NLLLoss(y_pred, y_true) uses as input y_pred.size = (n_batch, n_classes)
# and y_true.size = (n_batch), that's why it doesn't get reshaped.
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else: # Backprop error
loss = loss_fn(y_pred, y_true.view(-1, n_out).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
loss.backward()
# Update weights
optimizer.step()
return loss, acc
def validation_supervised(model, input_tensor, y_true, loss_fn, multiclass =False, n_classes= 1):
"""
Returns average loss for an input batch of data with a supervised model.
If running on multiclass mode, it also returns the accuracy.
"""
y_pred = model(input_tensor.float())
if multiclass:
loss = loss_fn(y_pred, y_true)
y_hat = y_pred.argmax(dim = 1)
acc = accuracy(y_hat, y_true)
else:
loss = loss_fn(y_pred, y_true.view(-1, n_classes).float())
try:
acc = accuracy(y_pred, y_true.view(-1, n_out).float())
except:
acc = None
return loss.mean().item(), acc
def supervised_trainer(
n_epochs:int,
train_loader:DataLoader,
val_loader:DataLoader,
model:nn.Module,
criterion,
optimizer,
multiclass:bool = False,
n_classes:int = 1,
logs_per_epoch:int = 5,
train_fn:callable = train_supervised,
model_dir:str = None,
model_name:str = None,
early_stopping_tol:float = 0.2,
**kwargs
):
"""
Wrapper function to train a supervised model for n_epochs.
Currently designed for classification and regression.
Returns train loss, validation loss and accuracy.
Params
------
n_epochs (int)
Number of forward-backward passes through all the training data.
train_loader, val_loader
Torch dataloaders of training and validation set. The validation set
is used for estimating model convergence.
model (nn.Module)
Supervised neural net model.
criterion (torch.nn.modules.loss object)
Loss function.
optimizer (torch.optim object)
Optimizer, e.g. Adam or RMSProp.
multiclass (bool, default = False)
Whether the model is a softmax classification model.
n_classes (int, default = 1)
Dimensionality of output dimension. Leave as 1 for multiclass,
i.e. the output is a probability distribution over classes (e.g. MNIST).
model_dir (str, default = None)
Path to store trained models. If set to None it will not store the model weights.
model_name (str, default = None)
Filename of the model to be stored. If set to None and `model_dir` is specified,
the model will be stored as `model.pt`
early_stopping_tol (float, default = 0.1)
Tolerance to stop the training.
It is used as the fractional increase in the validation loss
in order to stop the training. I.e. in pseudocode:
Stop if val_loss[i] > (1+early_stopping_tol)*val_loss[i-1]
The higher the value the more tolerant to run for the number of epochs.
If the value is small the traning loop can be too sensitive to small
increases in the validation loss.
**kwargs
All kwargs go to the train_fn and val_fn functions.
Returns
-------
train_loss_vector(array-like)
List with loss at every minibatch, of size (minibatch*n_epochs).
val_loss_vector(array-like)
Numpy array with validation loss for every epoch.
"""
batch_size = train_loader.batch_size
print_every = np.floor(train_loader.dataset.__len__() / batch_size / logs_per_epoch) # minibatches
train_loss_vector = [] # to store training loss
val_loss_vector = np.empty(shape = n_epochs)
val_acc_vector = np.empty(shape = n_epochs)
cuda = torch.cuda.is_available()
if cuda:
device = try_gpu()
torch.cuda.set_device(device)
model = model.to(device)
for epoch in np.arange(n_epochs):
running_loss = 0
# TRAINING LOOP
for ix, (data, y_true) in enumerate(tqdm.tqdm(train_loader)):
if len(data.shape)<4:
data = data.view(batch_size, -1).float()
if cuda:
data = data.cuda(device = device)
y_true = y_true.cuda(device = device)
train_loss, train_acc = train_fn(
model,
data,
y_true,
criterion,
optimizer,
multiclass=multiclass,
n_out =n_classes,
**kwargs
)
running_loss += train_loss.item()
# Print loss
if ix % print_every == print_every -1 :
# Print average loss
print('[%d, %6d] loss: %.3f' %
(epoch + 1, ix+1, running_loss / print_every))
train_loss_vector.append(running_loss / print_every)
# Reinitialize loss
running_loss = 0.0
# VALIDATION LOOP
with torch.no_grad():
model.eval()
validation_loss = []
validation_accuracy = []
for i, (data, y_true) in enumerate(tqdm.tqdm(val_loader)):
if len(data.shape)<4: # if not images
data = data.view(batch_size, -1).float()
if cuda:
data = data.cuda(device = device)
y_true = y_true.cuda(device = device)
val_loss, val_acc = validation_supervised(
model, data, y_true, criterion, multiclass, n_classes
)
validation_loss.append(val_loss)
validation_accuracy.append(val_acc)
mean_val_loss = torch.tensor(validation_loss).mean().item()
mean_val_acc = torch.tensor(validation_accuracy).mean().item()
val_loss_vector[epoch] = mean_val_loss
val_acc_vector[epoch] = mean_val_acc
print('Val. loss %.3f'% mean_val_loss)
print('Val. accuracy %.3f'% (mean_val_acc*100))
# EARLY STOPPING LOOP
if epoch > 0:
if val_loss_vector[epoch] > (1+early_stopping_tol)*val_loss_vector[epoch-1]:
print('Finished by early stopping at epoch %d'%(epoch))
return train_loss_vector, val_loss_vector
# SAVE MODEL
if model_dir is not None:
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if model_name is not None:
torch.save(
model.state_dict(),
os.path.join(model_dir, model_name + '_' + str(epoch) + '.pt')
)
else:
torch.save(
model.state_dict(),
os.path.join(model_dir, 'model' + '_' + str(epoch) + '.pt')
)
print('Finished training')
return train_loss_vector, val_loss_vector, val_acc_vector
def print_loss_in_loop(epoch, idx_batch, running_loss, print_every, message='loss'):
print_msg = '[%d, %5d] ' + message + ' : %.3f'
print(print_msg%\
(epodch + 1, idx_batch+1, running_loss / print_every))
def supervised_model_predict(
model:nn.Module,
data_loader,
criterion,
n_points = None,
n_feats= None,
multiclass=False,
n_outputs =1,
score = True
):
"""
Analog to model.predict_proba() from sklearn. Returns a prediction vector given a torch dataloder
and model. It is designed for working with basic supervised models like binary or multilabel
classification, and regression.
Params
------
model (torch.nn.model)
Trained supervised model.
data_loader
n_points (int)
Number of instances (rows) in the dataset. If not provided, the function will
try to extract it from the dataloader.
n_feats (int)
Input dimensions for the model / number of columns in the dataset. If not provided,
the function will try to extract it from the dataloader.
n_outputs (int, default = 1)
Number of outputs of the model. Defaults to 1 dim output, for regression or
binary classification.
Returns
-------
y_pred (np.array)
Array with raw predictions from a forward pass of the model.
"""
if n_points == None and n_feats == None:
try:
n_points, n_feats = data_loader.dataset.data.shape
except:
print('Need to supply number of datapoints and features in input data.')
batch_size = data_loader.batch_size
cuda = torch.cuda.is_available()
device = try_gpu()
model = model.to(device)
# Initialize predictions array
y_pred = torch.zeros(n_points, n_outputs)
if score:
cum_sum_loss = 0
cum_sum_acc = 0
with torch.no_grad():
for ix, (x, y) in tqdm.tqdm(enumerate(data_loader)):
if cuda:
x= x.cuda()
if cuda and score:
y =y.cuda()
# Reshape input for feeding to model
x = x.view(-1, n_feats)
outputs = model(x.float())
y_pred[ix * batch_size : ix * batch_size + batch_size, :] = outputs
if score:
if multiclass:
if cuda:
mean_loss = criterion(outputs, y).mean().cpu().detach().numpy()
else:
mean_loss = criterion(outputs, y).mean().detach().numpy()
acc = accuracy(y, outputs.argmax(axis = 1))#.item()
else:
if cuda:
mean_loss = criterion(outputs, y.view(-1, n_outputs).float()).mean().cpu().detach().numpy()
else:
mean_loss = criterion(outputs, y.view(-1, n_outputs).float()).mean().detach().numpy()
acc = accuracy(y.view(-1, n_outputs), outputs.argmax(axis = 1))#.mean().item()
cum_sum_loss+= mean_loss
cum_sum_acc +=acc
moving_avg_acc = cum_sum_acc / (ix+1)
moving_avg_loss = cum_sum_loss / (ix + 1)
if score:
print("Mean accuracy: %.2f" %moving_avg_acc)
print("Mean validation loss: %.2f"%moving_avg_loss)
return y_pred.detach().numpy()
def get_positive_negative_indices_batch(
y_true:torch.Tensor, index_dict:dict, cuda:bool = None
)->Tuple[np.array, np.array, np.array]:
"""
Returns indices for positive and negative anchors,
to use in metric learning using hinge triplet loss,
given labels (y_true) for multiclass classification.
Params
------
y_true(torch.Tensor)
Labels from sample codes.
cuda (bool, default = None)
Whether cuda is available for use.
Returns
-------
positive_anchor_ixs, negative_anchor_ixs, perm_labels
"""
max_index = max(index_dict.keys())
# Get cuda status
if cuda is None:
cuda = torch.cuda.is_available()
# Send labels to cpu
if cuda:
y_true = y_true.cpu()
# Make labels from torch.tensor -> numpy array
labels = y_true.numpy()
# Shuffle labels
perm_labels= np.random.permutation(labels)
# Check if any of shuffled labels didn't change
ix_eq = (labels == perm_labels)
# Get the indices where those unchanged labels reside
ix_to_flip = np.nonzero(ix_eq)[0]
# Enter loop if the permuted labels
# and the original labels coincide in an entry
if len(ix_to_flip) >= 1:
# If any of the labels to flip is the last code
# subtract as adding would result in error
label_flip_max_code = np.any(perm_labels[ix_to_flip] == max_index)
label_flip_min_code = np.any(perm_labels[ix_to_flip] == 0)
# Unlikely case where the batch contains both
# the first and last index
# this will cause the hinge loss to be the margin
if label_flip_max_code and label_flip_min_code:
print('At least one label in the positive and negative are the same')
pass
elif label_flip_max_code and not label_flip_min_code:
perm_labels[ix_to_flip] = perm_labels[ix_to_flip] - 1
# Fall back to add an index
else :
perm_labels[ix_to_flip] = perm_labels[ix_to_flip] + 1
# Check that all labels are different
#assert np.all(labels != perm_labels)
# Get anchor indices for samples
positive_anchor_ixs = [np.random.choice(index_dict[l], size = 1)[0] for l in labels]
negative_anchor_ixs = [np.random.choice(index_dict[l], size = 1)[0] for l in perm_labels]
return positive_anchor_ixs, negative_anchor_ixs, perm_labels
class JointEmbeddingTrainer:
"""
Class for training the joint embedding model.
Notes
-----
* Assumes both adata and df_drugs have coinciding names in the column
`drug_name`. Also assumes that adata has a column called `sample_codes`,
that are the numerical encoding of each drug name, i.e. that there's a
mapping {'drug_1': 0, ..., 'drug_n': (n-1)}.
* `indices` are needed when using a graph dataloader
(no .data attribute in dataloader object).
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
#index_dict_train:dict,
#index_dict_test:dict,
#name_to_mol:dict,
#ix_to_name:dict,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
indices=None,
force_cpu = False
):
"""
Params
------
adata(ad.AnnData)
Base anndata, contains both train and validation sets.
df_drugs(pd.DataFrame)
Pandas df containing mols in train and val sets.
regressor_loss (torch.nn.loss, default=None)
A torch loss function, e.g. nn.MSELoss
"""
#device = try_gpu()
#self.device = device
self.model = model
self.batch_size = batch_size
self.adata = adata
self.train_loader, self.val_loader = train_loader, val_loader
self.cuda = False if force_cpu else torch.cuda.is_available()
self.device = torch.device('cpu') if force_cpu == True else try_gpu()
if self.cuda:
self.model = self.model.to(self.device)
self.n_epochs = n_epochs
self.hinge_loss = nn.TripletMarginLoss(margin=margin, p=p_norm_metric)
self.criterion = nn.NLLLoss()
self.ordering_labels = torch.arange(batch_size).to(self.device)
self.contrastive = contrastive_learning #bool
self.metric = metric_learning #bool
self.n_train_batches = len(train_loader.dataset) // batch_size
self.n_test_batches = len(val_loader.dataset) // batch_size
self.optimizer = torch.optim.Adam(self.model.parameters(), lr = lr)
if self.contrastive == False and self.metric ==False:
raise AssertionError(
'Either one or both of contrastive learning and metric learning have to be active.'
)
self.model_name, self.model_dir = model_name, model_dir
if indices is not None:
self.index_dict_train = indices["train"]
self.index_dict_test = indices["test"]
else:
# Groupby on train adata
gb_train = train_loader.dataset.data.obs.groupby('sample_code')
index_dict_train = {}
for ix, data in gb_train:
index_dict_train[ix] = data.index.values
gb_test = val_loader.dataset.data.obs.groupby('sample_code')
index_dict_test = {}
for ix, data in gb_test:
index_dict_test[ix] = data.index.values
self.index_dict_train = index_dict_train
self.index_dict_test = index_dict_test
self.name_to_mol = dict(df_drugs[['drug_name', 'mol']].values)
self.ix_to_name = dict(adata.obs[['sample_code', 'drug_name']].values)
self.extra_head = extra_head
#if self.extra_head:
# self.regressor_loss = nn.MSELoss()
def contrastive_learning_loop(self, mol_embedding, cell_embedding):
"""Returns contrastive learning loss and cross-retrieval accuracy for a minibatch."""
# Make batch of molecular graphs
cell_embedding_norm = cell_embedding / cell_embedding.norm(dim= -1, keepdim = True)
mol_embedding_norm = mol_embedding / mol_embedding.norm(dim= -1, keepdim = True)
# Extract learnt scalar
logit_scale = self.model.logit_scale.exp()
# Get cosine similarities
# returns tensor of shape (mols, cells)
logits = logit_scale * mol_embedding_norm @ cell_embedding_norm.t()
# Get classification predictions across axes
y_pred_mols = F.log_softmax(logits, dim = 1)
y_pred_cells = F.log_softmax(logits, dim = 0)
# Calculate accuracies
mol_acc = accuracy(y_pred_mols.argmax(axis =1), self.ordering_labels)
cell_acc = accuracy(y_pred_cells.argmax(axis = 0), self.ordering_labels)
acc = (cell_acc + mol_acc)/ 2
# Compute contrastive learning loss
loss_mols = self.criterion(y_pred_mols, self.ordering_labels)
loss_cells = self.criterion(y_pred_cells, self.ordering_labels)
cl_loss = (loss_mols + loss_cells)/2
return cl_loss, acc
def metric_learning_loop(self, y_true, cell_embedding, mol_embedding):
"""Returns average hinge loss from cells2mols and mols2cells for a minibatch."""
pos_cell_ixs, neg_cell_ixs, perm_y_labels = get_positive_negative_indices_batch(
y_true, self.index_dict_train, cuda = self.cuda
)
# Get positive and negative anchors for cells
positive_anchors_cells = torch.from_numpy(self.adata[pos_cell_ixs].X.A)
negative_anchors_cells = torch.from_numpy(self.adata[neg_cell_ixs].X.A)
if self.cuda:
positive_anchors_cells = positive_anchors_cells.cuda()
negative_anchors_cells = negative_anchors_cells.cuda()
# Get negative anchors for molecules
permuted_molecule_batch = Batch.from_data_list(
get_drug_batch(
torch.from_numpy(perm_y_labels),
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute embeddings
positive_cell_embeddings = self.model.encode_cell(positive_anchors_cells)
negative_cell_embeddings = self.model.encode_cell(negative_anchors_cells)
permuted_molecule_embeddings = self.model.encode_molecule(permuted_molecule_batch)
# Compute metric learning loss
# (anchor, positive, negative)
hinge_cells_anchor = self.hinge_loss(
cell_embedding, mol_embedding, permuted_molecule_embeddings
)
hinge_mols_anchor = self.hinge_loss(
mol_embedding, positive_cell_embeddings, negative_cell_embeddings
)
metric_learning_loss = (hinge_cells_anchor + hinge_mols_anchor)/2
return metric_learning_loss
def mol_regressor_loop(self, mol_embedding, y_regressor, lambda_reg=1):
out = self.model.extra_head(mol_embedding)
reg_loss = self.regressor_loss(out, y_regressor)
return lambda_reg*reg_loss
def train_step(self, input_tensor, y_true):
"A single training step for a minibatch."
self.model.zero_grad()
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(
y_true,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
if not self.metric:
cl_loss.backward()
self.optimizer.step()
results_dict = {
'train_loss': {
'contrastive_loss': cl_loss.item(),
'metric_learning_loss': None
},
'train_acc': train_acc
}
return results_dict
if self.metric:
metric_learning_loss = self.metric_learning_loop(y_true, cell_embedding, mol_embedding)
if not self.contrastive:
metric_learning_loss.backward()
self.optimizer.step()
results_dict = {
'train_loss': {'contrastive_loss': None,'metric_learning_loss': met_loss.item()},
'train_acc': None
}
return results_dict
#if self.contrastive and self.metric:
# both contrastive and metric learning active
loss = cl_loss + metric_learning_loss
loss.backward()
self.optimizer.step()
results_dict = {
"train_loss": {
"contrastive_loss": cl_loss.item(),
"metric_learning_loss": metric_learning_loss.item(),
},
"train_acc": train_acc,
}
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true):
"""
"""
#self.model.eval()
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(
y_true,
self.name_to_mol,
self.ix_to_name,
cuda = self.cuda
)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
if not self.metric:
results_dict = {
'test_loss': {'contrastive_loss': cl_loss.item(),'metric_learning_loss': None},
'test_acc': test_acc
}
return results_dict
if self.metric:
metric_learning_loss = self.metric_learning_loop(y_true, cell_embedding, mol_embedding)
if not self.contrastive:
results_dict = {
'test_loss': {'contrastive_loss': None,'metric_learning_loss': met_loss.item()},
'test_acc': None
}
return results_dict
#if self.contrastive and self.metric:
# else: both contrastive and metric learning active
loss = cl_loss + metric_learning_loss
results_dict = {
"test_loss": {
"contrastive_loss": cl_loss.item(),
"metric_learning_loss": metric_learning_loss.item(),
},
"test_acc": test_acc,
}
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns the train and validation loss and accuracy as dataframes.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, (input_tensor, y_true) in tqdm.tqdm(enumerate(self.train_loader)):
# Train step
results_dict_train = self.train_step(input_tensor, y_true)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
#df_train_loss['epoch'] = epoch + 1
#df_train_acc['epoch'] = epoch +1
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d \n'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
for ix, (input_tensor, y_true) in tqdm.tqdm(enumerate(self.val_loader)):
# Val step
results_dict_test = self.val_step(input_tensor, y_true)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
#df_test_loss['epoch'] = epoch + 1
#df_test_acc['epoch'] = epoch + 1
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs #df_train_agg, df_test_agg
class JointEmbeddingTrainerV3(JointEmbeddingTrainer):
"""
Class for training the joint embedding using an extra regressor to combine
information from the molecule embedding and a profile of binding energies.
Designed to work with an MLP cell encoder.
TO-DO: Generalize and integrate all JointEmbeddinTrainer classes to a single
general one.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head = extra_head,
indices = indices,
force_cpu = force_cpu
)
self.lambda_reg = lambda_reg
self.regressor_loss = regressor_loss
def train_step(self, input_tensor, y_true, y_regressor=None):
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# This is the part that changes:
h = torch.cat([mol_embedding, y_regressor], dim = -1)
mol_embedding = self.model.extra_head(h)
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
# We don't take into account regressor loss,
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg=self.lambda_reg
# )
#
# loss+=reg_loss
#
# results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
# else:
# results_dict["train_loss"]["regressor_loss"]=None
# if "train_acc" not in results_dict.keys():
# results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true, y_regressor=None):
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Here's the part that changes
h = torch.cat([mol_embedding, y_regressor], dim = -1)
mol_embedding = self.model.extra_head(h)
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
## No need to log regresor loss
# if self.extra_head:
# reg_loss=self.mol_regressor_loop(
# mol_embedding, y_regressor, lambda_reg =self.lambda_reg
# )
#
# results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
# else:
# results_dict["regressor_loss"]=None
# if "test_acc" not in results_dict.keys():
# results_dict["test_acc"]=None
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns
-------
train_logs (pd.dataframe)
Train loss and accuracy.
validation_logs (pd.dataframe)
Val loss and accuracy.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = pd.DataFrame(), pd.DataFrame()
#df_train_logs, df_test_logs = pd.DataFrame(), pd.DataFrame()
for epoch in np.arange(self.n_epochs):
self.model.train()
# Loop through minibatches from training dataloader
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.train_loader)):
# Train step
results_dict_train = self.train_step(input_tensor, y_true, y_regressor)
df_train_loss = df_train_loss.append(
results_dict_train['train_loss'], ignore_index = True
)
df_train_acc = df_train_acc.append(
{'train_acc': results_dict_train['train_acc']}, ignore_index = True
)
mean_cl = df_train_loss.contrastive_loss.mean()
mean_ml = df_train_loss.metric_learning_loss.mean()
#mean_mse = df_train_loss.regressor_loss.mean()
mean_acc = df_train_acc.train_acc.mean()
print('Epoch %d'%(epoch+1))
print('--------------------')
print('Train contrastive loss: %.3f '%(mean_cl if mean_cl is not np.nan else 0.0))
print('Train metric learning loss: %.3f '%(mean_ml if mean_ml is not np.nan else 0.0))
#print('Train regression loss: %.3f '%(mean_mse if mean_mse is not np.nan else 0.0))
print('Train accuracy: %.3f'%(mean_acc*100 if mean_acc is not np.nan else 0.0))
print('\n')
# Loop through mb from validation dataloader
self.model.eval()
for ix, (input_tensor, y_true, y_regressor) in tqdm.tqdm(enumerate(self.val_loader)):
# Val step
results_dict_test = self.val_step(input_tensor, y_true, y_regressor)
df_test_loss = df_test_loss.append(results_dict_test['test_loss'], ignore_index = True)
df_test_acc = df_test_acc.append(
{'test_acc': results_dict_test['test_acc']}, ignore_index = True
)
mean_cl_ = df_test_loss.contrastive_loss.mean()
mean_ml_ = df_test_loss.metric_learning_loss.mean()
#mean_val_mse = df_test_loss.regressor_loss.mean()
mean_acc_ = df_test_acc.test_acc.mean()
print('Val contrastive loss: %.3f '%(mean_cl_ if mean_cl_ is not np.nan else 0.0))
print('Val metric learning loss: %.3f '%(mean_ml_ if mean_ml_ is not np.nan else 0.0))
#print('Val regression loss: %.3f '%(mean_val_mse if mean_val_mse is not np.nan else 0.0))
print('Validation accuracy: %.3f'%(mean_acc_*100 if mean_acc_ is not np.nan else 0.0))
print('\n')
# SAVE MODEL
if self.model_dir is not None:
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
if self.model_name is not None:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, self.model_name + '_' + str(epoch +1) + '.pt')
)
else:
torch.save(
self.model.state_dict(),
os.path.join(self.model_dir, 'model' + '_' + str(epoch +1) + '.pt')
)
# Summarize results
df_train_logs = pd.concat([df_train_loss, df_train_acc], axis = 1)
df_test_logs = pd.concat([df_test_loss, df_test_acc], axis = 1)
epoch_indicator_train = np.concatenate(
[np.repeat(epoch, self.n_train_batches) for epoch in np.arange(1, self.n_epochs+1)]
)
epoch_indicator_test = np.concatenate(
[np.repeat(epoch, self.n_test_batches) for epoch in np.arange(1, self.n_epochs +1)]
)
df_train_logs['epoch'] = epoch_indicator_train
df_test_logs['epoch'] = epoch_indicator_test
# Set logs as attributes
self.train_logs = df_train_logs
self.test_logs = df_test_logs
df_train_agg = df_train_logs.groupby('epoch').mean().reset_index()
df_test_agg = df_test_logs.groupby('epoch').mean().reset_index()
self.best_model_ix = int(df_test_agg.test_acc.argmax())
return df_train_logs, df_test_logs
class JointEmbeddingTrainerV2(JointEmbeddingTrainer):
"""
Class for training the joint embedding model using an auxiliary regressor task.
Notes
-----
Assumes both adata and df_drugs have coinciding names in the column
`drug_name`. Also assumes that adata has a column called `sample_codes`,
that are the numerical encoding of each drug name, i.e. that there's a
mapping {'drug_1': 0, ..., 'drug_n': (n-1)}.
"""
def __init__(
self,
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr:float = 1e-5,
n_epochs:int = 20,
metric_learning:bool = True,
contrastive_learning:bool = True,
p_norm_metric:int = 2,
margin:float = 3.,
model_name:str = None,
model_dir:str = None,
extra_head= True,
lambda_reg = 1,
regressor_loss=None,
indices=None,
force_cpu=False
):
super().__init__(
model,
adata,
df_drugs,
batch_size,
train_loader,
val_loader,
lr = lr,
n_epochs = n_epochs,
metric_learning = metric_learning,
contrastive_learning = contrastive_learning,
p_norm_metric = p_norm_metric,
margin = margin,
model_name = model_name,
model_dir = model_dir,
extra_head= extra_head,
indices=indices,
force_cpu=force_cpu
)
self.lambda_reg = lambda_reg
self.regressor_loss = regressor_loss
def train_step(self, input_tensor, y_true, y_regressor=None):
loss=0
results_dict={ "train_loss": {} }
self.model.zero_grad()
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, train_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
loss+=cl_loss
results_dict["train_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["train_acc"]=train_acc
else:
results_dict["train_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
loss+=metric_learning_loss
results_dict["train_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["metric_learning_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg=self.lambda_reg
)
loss+=reg_loss
results_dict["train_loss"]["regressor_loss"]=reg_loss.item()
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
else:
results_dict["train_loss"]["regressor_loss"]=None
if "train_acc" not in results_dict.keys():
results_dict["train_acc"]=None
#Backprop and update weights
loss.backward()
self.optimizer.step()
return results_dict
@torch.no_grad()
def val_step(self, input_tensor, y_true, y_regressor=None):
self.model.eval()
results_dict={"test_loss": {}} # init results dictionary
# Place tensors in GPU if possible
if self.cuda:
input_tensor = input_tensor.cuda()
y_true = y_true.cuda()
y_regressor = y_regressor.cuda()
# Make batch of molecular graphs
molecule_batch = Batch.from_data_list(
get_drug_batch(y_true,self.name_to_mol,self.ix_to_name,cuda = self.cuda)
)
# Compute cell and molecule embeddings
cell_embedding = self.model.encode_cell(input_tensor.view(self.batch_size, -1).float())
mol_embedding = self.model.encode_molecule(molecule_batch)
# Run through all modes of the model
if self.contrastive:
cl_loss, test_acc = self.contrastive_learning_loop(mol_embedding, cell_embedding)
results_dict["test_loss"]["contrastive_loss"]=cl_loss.item()
results_dict["test_acc"]=test_acc
else:
results_dict["test_loss"]["constastive_loss"]=None
if self.metric:
metric_learning_loss = self.metric_learning_loop(
y_true, cell_embedding, mol_embedding
)
results_dict["test_loss"]["metric_learning_loss"]=metric_learning_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["test_loss"]["metric_learning_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
if self.extra_head:
reg_loss=self.mol_regressor_loop(
mol_embedding, y_regressor, lambda_reg =self.lambda_reg
)
results_dict["test_loss"]["regressor_loss"]=reg_loss.item()
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
else:
results_dict["regressor_loss"]=None
if "test_acc" not in results_dict.keys():
results_dict["test_acc"]=None
#No backprop
#loss.backward()
#self.optimizer.step()
return results_dict
def train(self)-> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Trains the joint embedding model for n_epochs.
Returns
-------
train_logs (pd.dataframe)
Train loss and accuracy.
validation_logs (pd.dataframe)
Val loss and accuracy.
"""
df_train_loss, df_train_acc = pd.DataFrame(), pd.DataFrame()
df_test_loss, df_test_acc = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from copy import deepcopy as dc
import ipywidgets as widgets
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
"""regular expression search! I forget exactly why this is needed"""
m = re.search(r'\d+$', s)
if(m):
return s[:m.start()]+str(int(m.group())+1)
else:
return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selenzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
#["buffer10x",0.4],
#["ATP10mM",0.4],
#["BsaI", 0.2],
#["ligase",0.2],
["NEBbuffer",0.4],
["NEBenzyme",0.2],
["water",1.4],
["dnasln",1],
]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = pd.DataFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = pd.DataFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
"ASSGGA04":"384PP_PLUS_AQ_BP",
"ASSGIB01":"384LDV_PLUS_AQ_BP",
"ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
print("Welcome to Moclo Assembly Helper V1")
print("===================================")
def pickEnzyme():
"""asks the user about what kind of enzyme s/he wants to use"""
print("Which enzyme would you like to use?")
for el in range(len(enlist)):
print("[{}] {}".format(el,enlist[el]))
print()
userpick = int(input("type the number of your favorite! "))
selenzyme = enlist[userpick].lower()
print("===================================")
return selenzyme
def findExpts(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = []
#print(dirlist)
#for folder in dirlist[1:]:
folder = ['.']
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if("promoter" in fline):
expts+=[(os.path.join(folder[0],fle),fle[:-4])]
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(folder[0],fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if(dfs["Sheet1"].columns[0] == "promoter"):
expts+=[(os.path.join(folder[0],fle),fle[:-5])]
except (IOError,KeyError) as e:
pass
return sorted(expts)[::-1]
def findPartsLists(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(path,fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if("parts" in list(dfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1]
def pickPartsList():
"""user interface for picking a list of parts to use. This list must
contain the concentration of each part as well as the 384 well location
of each part at minimum, but better to have more stuff. Check my example
file."""
print("Searching for compatible parts lists...")
pllist = findPartsLists(os.path.join(".","partslist"))
pickedlist = ''
if(len(pllist) <=0):
print("could not find any parts lists :(. Make sure they are in a \
seperate folder called 'partslist' in the same directory as this script")
else:
print("OK! I found")
print()
for el in range(len(pllist)):
print("[{}] {}".format(el,pllist[el][1]))
print()
if(len(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = pd.read_excel(pickedlist,None)
print("===================================")
return openlist
def pickAssembly():
"""user interface for defining assemblies to build"""
#manual = raw_input("would you like to manually enter the parts to assemble? (y/n)")
manual = "n"
if(manual == "n"):
print("searching for compatible input files...")
time.sleep(1)
pllist = findExpts(".")
#print pllist
pickedlist = ''
if(len(pllist) <=0):
print("could not find any assembly files")
else:
print("OK! I found")
print()
for el in range(len(pllist)):
print("[{}] {}".format(el,pllist[el][1]))
print()
if(len(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = pd.read_csv(pickedlist)
print("===================================")
return openlist,pickedlist
else:
print("sorry I haven't implemented this yet")
pickAssembly()
return pd.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
dpname = "Destination[1]",platebc="",partid="",partname=""):
#if(platebc!=""):
# sptype = ptypedict[platebc]
return "{},{},{},{},{},{},,,{},{},{}\n".format(spname,platebc,sptype,swell,\
partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
"""calculates how much of a single part to put in for a number of fm."""
try:
pwell = partDF[partDF.part==partname].well.iloc[0]
except IndexError:
raise ValueError("Couldn't find the right part named '"+\
partname+"'! Are you sure you're using the right parts list?")
return None, None, None
pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
pconc = partDF[partDF.part==partname]["conc (nM)"]
#concentration of said part, in the source plate
if(len(pconc)<=0):
#in this case we could not find the part!
raise ValueError("Part "+part+" had an invalid concentration!"+\
" Are you sure you're using the right parts list?")
pconc = pconc.iloc[0]
pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
platet = partDF[partDF.part==partname]["platetype"].iloc[0]
e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
partname=partname,printstuff=printstuff)
return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
partname="",sptype=None,printstuff=True):
"""does the calculation to convert femtomoles to volumes, and returns
the finished echo line"""
pvol = (partFm/partConc)*1000
evol = int(pvol)
if(evol <= 25):#im not sure what happens when the echo would round to 0.
#better safe than sorry and put in one droplet.
evol = 25
if(sourceplate==None):
if(printstuff):
print("===> transfer from {} to {}, {} nl".format(sourcewell,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,partname=partname)
else:
if(printstuff):
print("===> transfer from {}, plate {} to {}, {} nl".format(sourcewell,sourceplate,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
sptype= sptype,platebc = sourceplate,partname=partname)
return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
"""looks up the part named "part" in the column specified as col, and
converts it into a pydna object.
this program will check if an input sequence is a valid part.
This involves checking a couple of things:
1) are there only two restriction cut sites?
2) does it have the proper overhangs?
3) after being cut, does it produce one part with bsai sites and one part without?
"""
pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
pcirc = partslist[partslist[col] == part].circular.iloc[0]
p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
povhg = int(p5pover)
pseqRC = str(Dseq(pseq).rc()).lower()
if(p5pover > 0):
pseq = pseq[p5pover:]
elif(p5pover<0):
pseqRC = pseqRC[:p5pover]
if(p3pover <0):
pseq = pseq[:p3pover]
elif(p3pover >0):
pseqRC = pseqRC[p5pover:]
pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
#this defines a dsdna linear sequence
if(pcirc):
#this makes the sequence circular, if we have to
pDseq = pDseq.looped()
if(enzyme != None):
numzymes = len(enzyme.search(pDseq,linear=not pcirc))##\
#len(enzyme.search(pDseq.rc(),linear=pcirc))
if(numzymes < 2 and pcirc):
warnings.warn("Be careful! sequence {} has only {} {} site"\
.format(part,numzymes,str(enzyme)))
elif(numzymes>=2):
try:
testcut = pDseq.cut(enzyme)
except IndexError:
raise IndexError("something's wrong with part "+part)
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
if(numzymes > 2):
warnings.warn("{} has {} extra {} site{}!!"\
.format(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
insert = []
backbone = []
for a in testcut:
fpend = a.five_prime_end()
tpend = a.three_prime_end()
if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
#in this case the fragment we are looking at is the 'backbone'
backbone+=[a]
else:
#we didn't find any site sequences. this must be the insert!
insert+=[a]
if((not fpend[0]=='blunt') and \
(not ((fpend[1].upper() in ENDDICT) or \
(fpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.format(part,fpend[1].upper()))
if((not tpend[0]=='blunt') and \
(not ((tpend[1].upper() in ENDDICT) or \
(tpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.format(part,tpend[1].upper()))
if(len(insert)==0):
raise ValueError("{} does not produce any fragments with no cut site!".format(part))
if(len(insert)>1):
warnings.warn("{} produces {} fragments with no cut site".format(part,len(insert)))
if(len(backbone)>1):
dontwarn = False
if(not pcirc and len(backbone)==2):
#in this case we started with a linear thing and so we expect it
#to make two 'backbones'
dontwarn = True
if(not dontwarn):
warnings.warn("{} produces {} fragments with cut sites".format(part,len(backbone)))
return pDseq
def bluntLeft(DSseq):
"""returns true if the left hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def bluntRight(DSseq):
"""returns true if the right hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def isNewDseq(newpart,partlist):
"""checks to see if newpart is contained within partlist, returns true
if it isn't"""
new = True
if(type(newpart)==Dseqrecord):
newdseqpart = newpart.seq
#seqnewpart = str(newpart).upper()
newcirc = newpart.circular
#dsequid = (newpart.seq).seguid()
#print("dsequid is "+str(dsequid))
#dsnewpart = Dseqrecord(newpart)
#rcnewpart = newpart.rc()
newseguid = newdseqpart.seguid()
#print("newseguid is "+str(newseguid))
cseguid = None
if(newcirc and type(newpart)==Dseqrecord):
cseguid = newpart.cseguid()
for part in partlist:
if(type(part == Dseqrecord)):
dseqpart = part.seq
partseguid = dseqpart.seguid()
if(newseguid==partseguid):
new=False
break
#if(len(part) != len(newpart)):
#continue
#dspart = Dseqrecord(part)
if(newcirc and part.circular):
if(type(part) == Dseqrecord and cseguid != None):
comparid = part.cseguid()
if(comparid == cseguid):
new=False
break
#if(seqnewpart in (str(part.seq).upper()*3)):
# new=False
# break
#elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
# new=False
# break
#elif(part == newpart or part == rcnewpart):
#new=False
#break
return new
def allCombDseq(partslist,resultlist = []):
'''recursively finds all possible paths through the partslist'''
if(len(partslist)==1):
#if there's only one part, then "all possible paths" is only one
return partslist
else:
#result is the final output
result = []
for p in range(len(partslist)):
newplist = dc(partslist)
#basically the idea is to take the first part,
#and stick it to the front of every other possible assembly
part = newplist.pop(p)
#this is the recursive part
prevresult = allCombDseq(newplist)
partstoadd = []
freezult = dc(result)
#for z in prevresult:
for b in prevresult:
#maybe some of the other assemblies
#we came up with in the recursive step
#are the same as assemblies we will come up
#with in this step. For that reason we may
#want to cull them by not adding them
#to the "parts to add" list
if(isNewDseq(b,freezult)):
partstoadd+=[b]
#try to join the given part to everything else
if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
#this means we don't allow blunt ligations! We also don't allow
#ligations between a linear and a circular part. Makes sense right?
#since that would never work anyway
newpart = None
try:
#maybe we should try flipping one of these?
newpart= part+b
except TypeError:
#this happens if the parts don't have the right sticky ends.
#we can also try rotating 'part' around
pass
try:
#part b is not blunt on the left so this is OK,
#since blunt and not-blunt won't ligate
newpart = part.rc()+b
except TypeError:
pass
if(newpart == None):
#if the part is still None then it won't ligate forwards
#or backwards. Skip!
continue
try:
if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
#given that the part assembled, can it be circularized?
newpart = newpart.looped()
#this thing will return TypeError if it can't be
#looped
except TypeError:
#this happens if the part can't be circularized
pass
if(isNewDseq(newpart,result)):
#this checks if the sequence we just made
#already exists. this can happen for example if we
#make the same circular assembly but starting from
#a different spot around the circle
result+=[newpart]
result+=partstoadd
return result
def pushDict(Dic,key,value):
"""adds a value to a dictionary, whether it has a key or not"""
try:
pval = Dic[key]
except KeyError:
if(type(value)==list or type(value)==tuple):
value = tuple(value)
pval = ()
elif(type(value)==str):
pval = ""
elif(type(value)==int):
pval = 0
elif(type(value)==float):
pval = 0.0
Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = {}
#print(dirlist)
#for folder in dirlist[1:]:
folder = [path]
#print(dirlist)
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
#print('{}\\{}'.format(folder[0],fle))
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if(teststr in fline):
expts[fle[:-4]]=os.path.join(folder[0],fle)
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(folder[0],fle))
#dfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
#print(xl_file.columns)
if(teststr in xl_file.columns):
#print("found")
expts[fle[:-5]]=os.path.join(folder[0],fle)
except (IOError,KeyError) as e:
pass
return expts
def findPartsListsDict(path,teststr = "parts_1"):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print(dirlist[0][2])
expts = {}
for fle in dirlist[0][2]:
#print fle
if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
try:
dfs = pd.read_excel(os.path.join(path,fle),None)
#dfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(dfs)
#print(dfs.keys())
if(teststr in list(dfs.keys())[0]):
expts[fle[:-5]] = os.path.join(path,fle)
except IOError:
pass
return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
"""given a start, a dictionary of nodes, and a dictionary of edges,
find all complete paths for a DNA molecule
Complete is defined as: producing a molecule with all blunt edges,
or producing a circular molecule."""
#we assemble the DNA sequences from left to right.
nnode = dc(nodeDict)
noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
del nnode[startNode]
destinations = edgeDict[noderight] #this could contain only one entry, the starting node
seqs = [] #haven't found any complete paths yet
nopaths = True
candidateSeqs = []
if(noderight != "blunt"): #blunt cannot go on
for destination in destinations:
#go through the list of destinations and see if we can go forward
if(destination[1]==0): #this node links to something else
if(destination[0] in nnode): #we havent visited it yet
nopaths = False
newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find all paths from there!
for path in newpaths:
candidateSeqs+=[[startNode]+path]
if(nopaths): #if we dont find any paths, call it good
candidateSeqs+=[[startNode]]
#print("canseqs is {}".format(candidateSeqs))
return candidateSeqs
def getOverhang(Dnaseq,side="left"):
"""extracts the overhang in the DNA sequence, either on the left or right sides.
If the dna sequence is blunt, then the returned overhang is called 'blunt'"""
def appendPart(part,pind,edgeDict,nodeDict):
"""this function appends a part to a dictionary of
edges (overhangs), and nodes(middle sequence) for running DPallcombDseq.
part is a DseqRecord of a DNA part that's been cut by an enzyme.
pind is the index of that part in the parts list
edgedict is a dictionary of edges that says which nodes they are connected
to.
nodedict is a dictionary of nodes that says which edges they have."""
Lend = ""
Rend = ""
Ltype,Lseq = part.five_prime_end()
Rtype,Rseq = part.three_prime_end()
if(Ltype == "blunt"):
Lend = "blunt"
#if the end is blunt append nothing
edgeDict[Lend].append([pind,0])
#pushDict(edgeDict,Lend,((pind,0),))
else:
if(Ltype == "3'"):
#if we have a 3' overhang, then add that sequence
Lend = str(Dseq(Lseq).rc()).lower()
else:
#otherwise, it must be a 5' overhang since we handled the
#blunt condition above.
Lend = str(Lseq).lower()
edgeDict[Lend].append([pind,0])
if(Rtype == "blunt"):
#same thing for the right side
Rend = "blunt"
edgeDict[Rend].append([pind,1])
else:
if(Rtype == "5'"):
Rend = str(Dseq(Rseq).rc()).lower()
else:
Rend = str(Rseq).lower()
edgeDict[Rend].append([pind,1])
nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
plen = len(part)
if(end=='3prime'):
ovhg = part.seq.three_prime_end()
loc1 = plen-len(ovhg[1])
loc2 = plen
else:
ovhg = part.seq.five_prime_end()
loc1 = 0
loc2 = len(ovhg[1])
oseq = str(ovhg[1]).upper()
scarname = "?"
floc = int(loc1)
sloc = int(loc2)
dir = 1
#scardir = "fwd"
if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
#either direction for now...
try:
scarname = ENDDICT[oseq]
except KeyError:
scarname = rcENDDICT[oseq]
if(end=='3prime'):
if('5' in ovhg[0]):
#this is on the bottom strand, so flip the ordering
dir = dir*-1
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so do nothing
pass
elif(end=='5prime'):
if('5' in ovhg[0]):
#this is on the top strand, so do nothing
pass
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so flip the ordering
dir = dir*-1
if(oseq in rcENDDICT.keys()):
#so if we found the reverse complement in fact, then reverse everything
#again
dir = dir*-1
if(dir==-1):
floc = int(loc2)
sloc = int(loc1)
#oseq = str(Dseq(oseq).rc())
part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPallCombDseq(partslist):
'''Finds all paths through the partsist using a graph type of approach.
First a graph is constructed from all possible overhang interactions,
then the program makes paths from every part to a logical conclusion
in the graph, then it backtracks and actually assembles the DNA.'''
#actually, we need to produce a graph which describes the parts FIRST
#then, starting from any part, traverse the graph in every possible path and store
#the paths which are "valid" i.e., produce blunt ended or circular products.
edgeDict = defaultdict(lambda : []) #dictionary of all edges in the partslist!
nodeDict = {}#defaultdict(lambda : [])
partDict = {}#defaultdict(lambda : [])
pind = 0
import time
rcpartslist = []
number_of_parts = len(partslist)
for part in partslist:
#this next part appends the part to the list of nodes and edges
appendPart(part,pind,edgeDict,nodeDict)
appendPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
rcpartslist+=[part.rc()]
pind+=1
partslist+=rcpartslist
paths = []
for pind in list(nodeDict.keys()):
#find good paths through the graph starting from every part
paths += findDNAPaths(pind,nodeDict,edgeDict)
goodpaths = []
part1time = 0
part2time = 0
for path in paths:
#here we are looking at the first and last parts
#to see if they are blunt
fpart = path[0]
rpart = path[-1]
npart = False
accpart = Dseqrecord(partslist[fpart])
if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
#this means we have a blunt ended path! good
npart = True
plen = len(accpart)
#accpart.add_feature(0,3,label="?",type="scar")
#accpart.add_feature(plen-4,plen,label="?",type="scar")
for pind in path[1:]:
#this traces back the path
#we want to add features as we go representing the cloning
#scars. These scars could be gibson or golden gate in nature
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
#this is checking if the overhangs on the ends are compatible.
#if true, then create a circular piece of DNA!
npart = True
#this means we have a circular part! also good!
#accpart = partslist[fpart]
for pind in path[1:]:
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart=accpart.looped()
if(npart):
#this checks if the part we think is good already exists
#in the list
if(isNewDseq(accpart,goodpaths)):
goodpaths+=[accpart]
#part2time+=time.time()-stime
#dtime = time.time()-stime
#stime = time.time()
#print("done tracing back paths, took "+str(dtime))
#print("first half took " + str(part1time))
#print("second half took " + str(part2time))
return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
"""chews back the amount mentioned, from the end mentioned."""
wat = seqtochew.watson
cri = seqtochew.crick
if(len(seqtochew) > chewamt*2+1):
if(end=="fiveprime"):
cwat = wat[chewamt:]
ccri = cri[chewamt:]
else:
cwat = wat[:-chewamt]
ccri = cri[:-chewamt]
newseq = Dseq(cwat,ccri,ovhg = chewamt)
return newseq
else:
return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
output = "output.csv",selenzyme=selenzyme,fname="recentassembly",\
protocolsDF=None,sepfiles=True,sepfilename="outputLDV.csv",\
printstuff=True,progbar=None,mypath=".",annotateDF=None):
"""makes an echo csv using the given list of assemblies and source plate of
parts..
inputs:
parts: dataframe of what's in the source plate
aslist: dataframe of what we need to assemble
gga: a short dictionary indicating what volume of all the components
go into the reaction mix
partsFm: how many femtomoles of each part to use
source: the name of the source plate. like "384PP_AQ_BP or something
output: the name of the output file
selenzyme: the enzyme we are going to use for assembly. everything
is assembled with the same enzyme! actually this does nothing because
the enzyme is taken from the aslist thing anyway
fname: this is the name of the folder to save the successfully assembled
dna files into
protocolsDF: a dataframe containing a descriptor for different possible
protocols. For instance it would say how much DNA volume and
concentration we need for GGA or gibson."""
#this is the boilerplate columns list
dnaPath = os.path.join(mypath,"DNA")
outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f1init = len(outfile)
outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f2init = len(outfile2)
#this iterates through rows in the assembly list file. Each row
#defines an assembly, with the columns representing what parts go in.
#this may not be ideal but it's fairly human readable and we only do
#four parts + vector for each assembly.
_,fname = os.path.split(fname)
if("." in fname):
fname = fname[:fname.index(".")]
#the following is for making a spreadsheet style sequence list for
#performing further assemblies
prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,length\n"
prevplate = None
prevtype = None
maxprog = float(len(aslist))
for assnum in range(len(aslist)):
#this goes row by row
if(progbar != None):
progbar.value=float(assnum+1)/maxprog
assembly = aslist[assnum:assnum+1] #cuts out one row of dataframe
dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
#print("pick enzyme")
#print(assembly)
enzyme=None
#if we are doing Gibson assembly, then the restriction enzyme is undefined
try:
selenzyme = assembly.enzyme[assembly.enzyme.index[0]]
#if the user forgot to define an enzyme assume it is BsaI. That's the most common one we use
except KeyError:
selenzyme = "BsaI"
if(protocolsDF!=None):
cprt_temp = "gga"
if(selenzyme == "gibson"):
cprt_temp = "gibson"
#iloc[0] is used in case there are multiple parts with the same
#name. Only the first one is used in that case.
curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
(protocolsDF.component == "dnasln")].amount.iloc[0]}
partsFm = curprot[curprot.component==partfm].amount.iloc[0]
vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
else:
curprot = ggaPD
partsFm = ggaFm
vectorFm = ggavecGm
if(selenzyme == "gibson"):
#for gibson assembly the protocol is different
curprot = gibassyPD
partsFm = gibFm
vectorFm = gibvecFm
water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
if(printstuff):
print("assembling with "+selenzyme)
aind = assembly.index[0] #necessary for dataframes probably because I'm dumb
frags = []
if(not selenzyme == "gibson"):
enzyme = enzymes[selenzyme]
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
for col in assembly:
if(col=="targwell"):#since every row is terminated by the "target well",
#we'll take this opportunity to put in the water
if(int(water) <25):
#echo gets mad if you tell it to pipet significantly less than 25 nl
water = 25
ewat = int(water) #the echo automatically rounds to the nearest 25,
#so it's not really necessary to round here.
#dsrfrags = [Dseqrecord(a) for a in frags]
#x = pydAssembly(dsrfrags,limit = 4)
#print(frags)
#print(len(frags))
allprod= []
nefrags = []
cutfrags = []
if(selenzyme != "gibson"):
enzyme = enzymes[selenzyme]
for frag in frags:
if(selenzyme == "gibson"):
if(len(frag)>chewnt*2+1):
nefrags += [chewback(frag,chewnt)]
else:
raise ValueError("part with sequence "+frag+" is too "+\
"short for gibson! (<= 80 nt)")
else:
newpcs = frag.cut(enzyme)
if(len(newpcs) == 0):
newpcs+=[frag]
for pcs in newpcs:
if(pcs.find(esite)+pcs.find(esiterc)==-2):
nefrags+=[pcs]
allprod = DPallCombDseq(nefrags)
if(printstuff):
print("found {} possible products".format(len(allprod)))
goodprod = []
newpath = os.path.join(dnaPath,fname)
if(printstuff):
print("saving in folder {}".format(newpath))
Cname = ""
try:
#this part gathers the "name" column to create the output sequence
Cname = assembly.name[assembly.name.index[0]]
except KeyError:
Cname = ""
if(Cname == "" or str(Cname) == "nan"):
Cname = "well"+dwell
if(printstuff):
print("Parts in construct {}".format(Cname))
if not os.path.exists(newpath):
if(printstuff):
print("made dirs!")
os.makedirs(newpath)
num = 0
for prod in allprod:
Cnamenum = Cname
#filename = Cname+".gbk"
if(len(allprod) > 1):
#filename = Cname+"_"+str(num)+".gbk"
#wout = open(os.path.join(newpath,filename),"w")
Cnamenum = Cname+"_"+str(num)
else:
pass
#wout = open(os.path.join(newpath,filename),"w")
if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
num+=1
goodprod+=[prod]
#topo = ["linear","circular"][int(prod.circular)]
booltopo = ["FALSE","TRUE"][int(prod.circular)]
#wout.write("\r\n>Construct"+str(num)+"_"+topo)
un_prod = "_".join(Cnamenum.split())
#wout.write("LOCUS {} {} bp ds-DNA {} SYN 01-JAN-0001\n".format(un_prod,len(prod),topo))
#wout.write("ORIGIN\n")
#wout.write(str(prod)+"\n//")
now = datetime.datetime.now()
nowdate = "{}/{}/{}".format(now.month,now.day,now.year)
prod.name = Cnamenum
plt.figure(figsize=(8,1))
ax = plt.gca()
drawConstruct(ax,prod,annotateDF=annotateDF)
plt.show()
prod.write(os.path.join(newpath,Cnamenum+".gbk"))
prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".format(\
dwell,un_prod, selenzyme,nowdate,prod.seq,booltopo,0,0,len(prod))
#wout.close()
assembend = ["y","ies"][int(len(goodprod)>1)]
if(printstuff):
print("Detected {} possible assembl{}".format(len(goodprod),assembend))
frags = []
if(water <=0):
print("WARNING!!!! water <=0 in well {}".format(dwell))
else:
#print("water from {} to {}, {} nl".format(waterwell,dwell,ewat))
if(prevplate == None):
#print("normalwater")
#im not convinced this ever gets triggered
#but just in case, i guess we can find the first water well
waterrows=parts[parts.part=="water"]
if(len(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
platetype= waterrow.platetype
curplatebc = waterrow.platebc
outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
sptype=platetype,platebc = curplatebc,partname="water")
else:
#print("platewater")
#print(prevplate)
waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
if(len(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
sptype=prevtype,platebc = prevplate,partname="water")
if("LDV" in prevtype):
outfile2+=watline
else:
outfile += watline
#add water to the well!
if(printstuff):
print("")
elif(col in ["comment","enzyme","name"]):#skip this column!
pass
else:
#this is the part name from the "assembly" file
part = assembly[col][aind]
if(str(part) == 'nan'):
#this means we skip this part, because the name is empty
if(printstuff):
print("skip one!")
else:
#shouldnt need to define "part" again??
#part = assembly[col][aind]
#this is the name of the part!
#parts[parts.part==assembly[col][aind]].well.iloc[0]
evol = 0
if(':' in str(part)):
#this means we have multiple parts to mix!
subparts = part.split(':')
t_partsFm = partsFm/len(subparts)
t_vecFm = vectorFm/len(subparts)
for subpart in subparts:
useFm = t_partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = t_vecFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
else:
useFm = partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = vectorFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
water=water-evol
pspread = open(os.path.join(newpath,fname+".csv"),"w")
pspread.write(prodSeqSpread)
pspread.close()
seqdispDF = pd.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","length"])
display(seqdispDF)
display(FileLink(os.path.join(newpath,fname+".csv")))
if(len(outfile)>f1init):
ofle = open(output,"w")
ofle.write(outfile)
ofle.close()
display(FileLink(output))
if(sepfiles and (len(outfile2) > f2init)):
if(printstuff):
print("wrote LDV steps in {}".format(sepfilename))
ofle2 = open(sepfilename,"w")
ofle2.write(outfile2)
ofle2.close()
display(FileLink(sepfilename))
outitems = []
class assemblyFileMaker():
def __init__(self,mypath=".",partsdf = None):
self.p = partsdf
self.holdup=False
self.ddlay = widgets.Layout(width='75px',height='30px')
self.eblay = widgets.Layout(width='50px',height='30px')
self.lsblay = widgets.Layout(width='140px',height='30px')
self.sblay = widgets.Layout(width='100px',height='30px')
self.rsblay = widgets.Layout(width='60px',height='30px')
self.Vboxlay = widgets.Layout(width='130px',height='67px')
self.textlay = widgets.Layout(width='200px',height='30px')
self.PlateLetters="ABCDEFGHIJKLMNOP"
self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
13,14,15,16,17,18,19,20,21,22,23,24)
self.PlateRowsCols=(16,24)
self.mypath = mypath
if(type(self.p)==pd.DataFrame):
self.parts={"google doc":"google doc"}
else:
self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
#txtdisabl = False
assemblies = []
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
#parts = findPartsListsDict(os.path.join(mypath,"partslist"))
self.loadFIleList = widgets.Dropdown(
options=oplist,
#value=2,
layout=self.lsblay,
description='',
)
self.loadbut = widgets.Button(
description='Load',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.rsblay,
tooltip='Click to load an existing file',
)
self.listEverything = widgets.Checkbox(
value=False,
description='List all parts',
disabled=False
)
self.fname1 = widgets.Text(
value="untitled",
placeholder = "type something",
description='Assembly File Name:',
layout=self.textlay,
disabled=False
)
self.DestWell = widgets.Text(
value="A1",
placeholder = "type something",
description='Dest Well:',
layout=self.Vboxlay,
disabled=True
)
self.AddCols = widgets.IntText(
value=0,
placeholder = "type something",
description='Extra Cols:',
layout=self.Vboxlay,
#disabled=True
)
self.drop2 = widgets.Dropdown(
options=self.parts,
width=100,
#value=2,
description='parts list:',
layout=self.textlay,
)
#print(self.drop2.style.keys)
self.but = widgets.Button(
description='New...',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.sblay,
tooltip='Click to start adding assemblies',
#icon='check'
)
self.finbut = widgets.Button(
description='Save!',
disabled=True,
button_style='warning',#, 'danger' or ''
layout=self.sblay,
tooltip='Finish and Save',
#icon='check'
)
self.but.on_click(self.on_button_clicked)
self.finbut.on_click(self.finishAndSave)
self.loadbut.on_click(self.loadFile_clicked)
self.listEverything.observe(self.on_listEverything_changed,names='value')
self.cbox = widgets.HBox([
widgets.VBox([self.fname1,widgets.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
widgets.VBox([self.drop2,widgets.HBox([self.DestWell,self.AddCols])]),\
widgets.VBox([self.but,self.finbut],layout=self.Vboxlay)])
display(self.cbox)
def add_row(self,b):
thisrow = int(b.tooltip[4:])
self.addWidgetRow(labonly=False,copyrow=thisrow)
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#b.disabled=True
#print(b)
def remove_row(self,b):
thisrow = int(b.tooltip[4:])
#outcolnum=0
cleared = False
for colnum in list(range(len(self.outitems))[:-3])\
+[len(self.outitems)-2]:
pvalue = self.outitems[colnum][thisrow].value
if(pvalue != ""):
cleared = True
self.outitems[colnum][thisrow].value = ""
if(cleared):
return
for colnum in range(len(self.outitems)):
self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
self.outitems[colnum][thisrow+1:]
#outcolnum +=1
newbutcol = []
newrow = 0
for a in self.outitems[-1]:
#print(a)
try:
a.children[0].tooltip = "row "+str(newrow)
a.children[1].tooltip = "row "+str(newrow)
if(len(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
newrow +=1
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#print(b)
def generateOptionsList(self,df,colname,prevval=None,listmode=0):
"""come up with a list of options given a column name. This contains
a ton of specific code"""
oplist = []
if(listmode == 1 and colname != "enzyme"):
oplist = sorted(list(df.part))+[""]
else:
if("vector" in colname):
oplist = sorted(list(df[(df.type=="UNS")|\
(df.type=="vector")].part))+[""]
elif(colname=="enzyme"):
oplist =enlist
if(prevval == ""):
prevval = enlist[0]
else:
oplist = sorted(list(df[df.type==colname].part))+[""]
if(not (prevval in oplist)):
oplist+=[prevval]
return oplist,prevval
def on_listEverything_changed(self,change):
"""this triggers when you change the value of "listEverything".
Here we want to change the values in the drop down to correspond to
either
(a) surrounding parts or
(b) the appropriate category
"""
self.updatePartOptions(None)
"""
typewewant = type(widgets.Dropdown())
#this means we checked the box. Now change drop box's options
for col in self.outitems:
for item in col:
if(type(item)==typewewant):
oplist,pval = self.generateOptionsList(self.p,\
col[0].value,item.value,change['new'])
item.options=oplist
item.value=pval
#"""
def loadFile_clicked(self,b):
"""loads a file from memory, instead of making a brand new one!"""
self.on_button_clicked(b,loadFile=self.loadFIleList.value)
def on_button_clicked(self,b,loadFile=None):
"""start making the assembly! THis part loads the first row of parts
drop downs and populates them with options!"""
#txtdisabl = True
b.disabled=True
self.but.disabled = True
self.drop2.disabled = True
self.finbut.disabled = False
self.DestWell.disabled = False
self.AddCols.disabled = True
self.loadFIleList.disabled=True
self.loadbut.disabled=True
if(loadFile!=None):
#this should read the file
self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
ftoload = pd.read_csv(loadFile).fillna('')
try:
ftoload = ftoload.drop('comment',axis=1)
except (ValueError,KeyError) as e:
#if this happens then 'comment' was already not there. great!
pass
self.AddCols.value=len(ftoload.columns)-9
if(not(type(self.p)==pd.DataFrame)):
dfs = pd.read_excel(self.drop2.value,None)
sheetlist = list(dfs.keys())
self.p = pd.DataFrame.append(dfs["parts_1"],dfs["Gibson"])
self.collabels = ["vector1","promoter","UTR","CDS","Terminator","vector2","enzyme","name",""]
if(self.AddCols.value>0):
newclabeld = self.collabels
for x in range(self.AddCols.value):
newclabeld=newclabeld[:-4]+["newcol"+str(x+1)]+newclabeld[-4:]
self.collabels = newclabeld
self.outitems = []
self.addWidgetRow(labonly=True)
if(loadFile==None):
self.addWidgetRow(labonly=False)
else:
#print(loadFile)
findex = ftoload.index
first = True
for findex in ftoload.index:
dfrow = ftoload.iloc[findex]
currow = list(dfrow)
if(first):
self.DestWell.value=dfrow.targwell
#extracols =
#startpos =
first=False
currow = list(dfrow.drop(['targwell','name','enzyme']))\
+[dfrow.enzyme]+[dfrow["name"]]
self.addWidgetRow(labonly=False,copyrow=currow)
#self.updatePartOptions()
#readindex = ftoload.index()
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet=widgets.HBox(outcols)
display(self.bigSheet)
def updatePartOptions(self,b=None):
"""update the options available to each drop down, according to what
values are chosen in the other drop downs. For example, only allow
parts which are compatible"""
if(self.holdup):
return
self.holdup=True
maxcols = len(self.outitems)-3
for colnum in range(maxcols):
for itemnum in range(len(self.outitems[colnum]))[1:]:
curitem = self.outitems[colnum][itemnum]
leftitem = 0
rightitem = 0
if(colnum == 0):
leftitem = maxcols-1
else:
leftitem = colnum-1
if(colnum == maxcols-1):
rightitem = 0
else:
rightitem=colnum+1
leftoverhang = ""
rightoverhang = ""
leftvalue = self.outitems[leftitem][itemnum].value
rightvalue = self.outitems[rightitem][itemnum].value
logiclist = np.array([True]*len(self.p))
if(leftvalue!=""):
try:
leftoverhang=self.p[self.p.part == leftvalue].right.iloc[0]
except IndexError:
#this means we didn't find the part!
raise ValueError("part {} has incorrect right overhang!".format(leftvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in leftoverhang)):
pass
else:
logiclist &= (self.p.left==leftoverhang)
#print(leftoverhang)
if(rightvalue!=""):
try:
rightoverhang=self.p[self.p.part == rightvalue].left.iloc[0]
except IndexError:
raise ValueError("part {} has incorrect right overhang!".format(rightvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in rightoverhang)):
pass
else:
logiclist &= (self.p.right==rightoverhang)
#print(rightoverhang)
#print("this part wants {} and {}".format(leftoverhang,rightoverhang))
self.holdup=True
prevval = curitem.value
oplist,newval = self.generateOptionsList(self.p[logiclist],\
self.outitems[colnum][0].value,\
prevval,self.listEverything.value)
curitem.options = oplist
curitem.value = newval
self.holdup=False
def incrementWellPos(self,position):
"""increments a 384 well plate location such as A1 to the next logical
position, going left to right, top to bottom"""
poslet = self.PlateLetters.index(position[0])
posnum = int(position[1:])
newposlet = poslet
newposnum = posnum+1
if(newposnum > self.PlateRowsCols[1]):
newposnum-=self.PlateRowsCols[1]
newposlet+=1
newposition = self.PlateLetters[newposlet]+str(newposnum)
return newposition
def finishAndSave(self,b):
outfiletext = ",".join(self.collabels[:-1]+["targwell"])+"\n"
outfname = self.fname1.value+".csv"
startPos = self.DestWell.value
curpos = startPos
for i in range(len(self.outitems[0]))[1:]:
outlst = []
for nam,col in zip(self.collabels,self.outitems):
if(nam != ""):
outlst+=[col[i].value]
outlst+=[curpos]
curpos = self.incrementWellPos(curpos)
outfiletext+=",".join(outlst)+"\n"
with open(os.path.join(self.mypath,"assemblies",outfname),"w") as outfle:
outfle.write(outfiletext)
assemfpath = os.path.join(self.mypath,"assemblies",outfname)
#print("wrote {}".format())
display(FileLink(assemfpath))
display(pd.read_csv(os.path.join(self.mypath,"assemblies",outfname)))
#b.disabled=True
def addWidgetRow(self,labonly=True,copyrow=None):
outcolnum=0
for col in self.collabels:
if(labonly):
interwidg = widgets.Label(col)
else:
if(col=="name"):
newname = ""
#print(copyrow)
if(type(copyrow)==list):
newname = copyrow[outcolnum]
elif(type(copyrow)==int):
oldname = self.outitems[outcolnum][copyrow].value
newname = incrementString(oldname)
interwidg = widgets.Text(\
layout=self.ddlay,\
value=str(newname))
elif(col==""):
but1 = widgets.Button(\
description='+',
button_style='success',
tooltip='row '+str(len(self.outitems[0])-1),
layout=self.eblay
)
but2 = widgets.Button(\
description='-',
button_style='danger',
tooltip='row '+str(len(self.outitems[0])-1),
layout=self.eblay,
#disabled=disbut
)
but1.on_click(self.add_row)
but2.on_click(self.remove_row)
interwidg =widgets.HBox([but1,but2])
else:
oplist = []
prevval = ""
if(type(copyrow)==int):
prevval = self.outitems[outcolnum][copyrow].value
elif(type(copyrow)==list):
prevval = copyrow[outcolnum]
oplist, prevval = self.generateOptionsList(self.p,col,\
prevval,self.listEverything.value)
#print(oplist)
#print("value is")
#print(prevval)
interwidg = widgets.Dropdown(\
options=oplist,\
value=prevval,\
layout=self.ddlay)
interwidg.observe(self.updatePartOptions,names='value')
try:
self.outitems[outcolnum]+=[interwidg]
except IndexError:
self.outitems+=[[interwidg]]
outcolnum +=1
self.updatePartOptions()
for a in self.outitems[-1]:
try:
if(len(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
def make_assembly_file(mypath=".",externalDF = None):
"""this function will assist the user with making assembly .csv files!"""
x=assemblyFileMaker(mypath=mypath,partsdf=externalDF)
def process_assembly_file(mypath=".",printstuff=True,partsdf=None,annotateDF=None):
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
if(type(partsdf)==pd.DataFrame):
parts = {"google doc":"google doc"}
else:
parts = findPartsListsDict(os.path.join(mypath,"partslist"))
drop1 = widgets.Dropdown(
options=oplist,
#value=2,
description='Assembly:',
)
drop2 = widgets.Dropdown(
options=parts,
#value=2,
description='parts list:',
)
but = widgets.Button(
description='Select',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
#icon='check'
)
#button = widgets.Button(description="Click Me!")
#display(button)
#print(oplist)
def on_button_clicked(b):
pbar = widgets.FloatProgress(
min=0,
max=1.0
)
display(pbar)
if(drop1.value[-4:]=="xlsx" or drop1.value[-3:]=="xls"):
x=pd.read_excel(drop1.value)
else:
x=pd.read_csv(drop1.value)
if(type(partsdf)==pd.DataFrame):
p = partsdf
else:
dfs = pd.read_excel(drop2.value,None)
#print(drop1.value)
sheetlist = list(dfs.keys())
p = pd.DataFrame.append(dfs["parts_1"],dfs["Gibson"])
makeEchoFile(p,x,fname = drop1.value, \
output = os.path.join(mypath,"output","output.csv"),\
sepfilename=os.path.join(mypath,"output","outputLDV.csv"),\
printstuff=printstuff,progbar=pbar,mypath=mypath,annotateDF=annotateDF)
#print(drop1.value+" and "+drop2.value)
but.on_click(on_button_clicked)
cbox = widgets.HBox([drop1,drop2,but])
display(cbox)
#def fixPart(partseq,enz="BsaI",circ=True,end5p=0,end3p=0,goodends=ENDDICT):
def drawConstruct(ax,construct,dnaline=3,dnascale=2,annotateDF=None,schematic=True,labels='off',showscars=0):
"""creates a dnaplotlib image of a construct in dnaseqrecord format!"""
def substring_indexes(substring, string):
"""
Generate indices of where substring begins in string
>>> list(find_substring('me', "The cat says meow, meow"))
[13, 19]
"""
last_found = -1 # Begin at -1 so the next position to search from is 0
while True:
# Find next index of substring, by starting after its last known position
last_found = string.find(substring, last_found + 1)
if last_found == -1:
break # All occurrences have been found
yield last_found
dr = dpl.DNARenderer(scale = dnascale,linewidth=dnaline)
part_renderers = dr.SBOL_part_renderers()
conlist = []
if(type(annotateDF)==pd.DataFrame):
str_conseq = str(construct.seq).lower()
#print("annotating!")
#now we annotate the plasmid!!
for feature_index in annotateDF.index:
fname = annotateDF.iloc[feature_index]["name"]
#iterate through all the features and see if they are in our sequence
#but the problem is that it could be circular
featseq = annotateDF.iloc[feature_index].sequence.lower()
colorstr = annotateDF.iloc[feature_index].colorlist
colorstr2 = annotateDF.iloc[feature_index].colorlist2
#print(featcolor)
feattype = annotateDF.iloc[feature_index].type
featlen = len(featseq)
#print(featcolor)
if(featseq[-3:]=="..."):
featseq=featseq[:-3]
rcfeatseq = str(Dseq(featseq).rc()).lower()
#if(feattype == 'CDS'):
#print(featseq[:10]+"..."+featseq[-10:])
if(featseq in str_conseq):
#it could be in there multiple times
for featfound in substring_indexes(featseq,str_conseq):
#every time we find the feature...
construct.add_feature(featfound,featfound+featlen,seq=None,type=feattype,label=fname,strand=1 )
construct.features[-1].qualifiers["color"]=colorstr
construct.features[-1].qualifiers["color2"]=colorstr2
if(rcfeatseq in str_conseq):
for featfound in substring_indexes(rcfeatseq,str_conseq):
#every time we find the feature...
construct.add_feature(featfound,featfound+featlen,seq=None,type=feattype,label=fname ,strand=-1)
construct.features[-1].qualifiers["color"]=colorstr
construct.features[-1].qualifiers["color2"]=colorstr2
if(schematic==False):
seqlen = len(construct)
sp = {'type':'EmptySpace', 'name':'base', 'fwd':True, \
'opts':{'x_extent':seqlen+10}}
design = [sp]
start,end = dr.renderDNA(ax,design,part_renderers)
sbol_featlist = []
flist = sorted(construct.features,key=lambda a: a.location.start)
for feature in flist:
#feature = a[1]
featname = feature.qualifiers["label"]
feattype = feature.type
if("color" in feature.qualifiers):
colorstr = feature.qualifiers["color"]
if(colorstr != "(255,255,255)" and not type(colorstr)==float):
#don't add pure white as a color
featcolor = tuple([float(a)/255.0 for a in colorstr[1:-1].split(",")])
else:
featcolor = None
else:
colorstr = None
featcolor = None
if("color2" in feature.qualifiers):
colorstr2 = feature.qualifiers["color2"]
if(colorstr2 != "(255,255,255)" and not type(colorstr2)==float):
#don't add pure white as a color
featcolor2 = tuple([float(a)/255.0 for a in colorstr2[1:-1].split(",")])
else:
featcolor2 = None
else:
colorstr2 = None
featcolor2 = None
#print(featcolor)
#print(feature.location)
loclist = [feature.location.start,feature.location.end]
if(loclist[1]<loclist[0]):
featstrand = False
else:
featstrand = True
if(feature.strand==-1):
featstrand = False
featstart = min(loclist)
featend = max(loclist)
featlen = featend-featstart
if(not schematic):
feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
'start':featstart,'end':featend,\
'opts':{'label':featname,'label_size':13,\
'label_y_offset':-5,'x_extent':featlen}}
else:
feat = {'type':feattype, 'name':featname, 'fwd':featstrand, \
#'start':featstart,'end':featend,\
'opts':{'label':featname,'label_size':13,\
'label_y_offset':-5}}
if(feattype == 'CDS'):
feat['opts']['x_extent']=30
if(not (featcolor == None) ):
#only add the color if it exists
feat['opts']['color']=featcolor
if(not (featcolor2 == None) ):
#only add the color if it exists
feat['opts']['color2']=featcolor2
if(labels=="off"):
feat['opts']['label']=""
if(feattype == 'Scar' and not showscars):
pass
else:
sbol_featlist+=[feat]
if(schematic):
start,end = dr.renderDNA(ax,sbol_featlist,part_renderers)
else:
for feat in sbol_featlist:
dr.annotate(ax,part_renderers,feat)
if(not construct.linear):
vheight = 5
curves = (end-start)*.05
plasmid = FancyBboxPatch((start-curves, -vheight*2), \
(end-start)+(end-start)*.1+curves*2, vheight*2,\
fc="none",ec="black", linewidth=dnaline, \
boxstyle='round,pad=0,rounding_size={}'.format(curves), \
joinstyle="round", capstyle='round',mutation_aspect=vheight/curves)
ax.add_patch(plasmid)
else:
curves = 0
ax.set_xlim([start-1.2*curves, end+1.2*curves+(end-start)*.1*(1-construct.linear)])
ax.set_ylim([-12,12])
#ax_dna.set_aspect('equal')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
def runProgram():
"""runs the process_assembly_file function with command line prompts.
Probably doesn't work"""
#x=pd.read_csv(insheet,sep=",")
#pickhand = raw_input("is this for the echo? (y/n)")
pickhand = 'y'
xl_file=pickPartsList()
x,fname=pickAssembly()
#enz=pickEnzyme()
#p=pd.read_csv("partslist/CIDAR_parts_plate_ASS.csv",sep=",")
#pd.ExcelFile("partslist/CIDAR_parts_plate_ASS.xlsx")
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
sheetlist = list(dfs.keys())
p = | pd.DataFrame.append(dfs["parts_1"],dfs["Gibson"]) | pandas.DataFrame.append |
import pandas as pd
import numpy as np
import sys
import traceback
from tqdm.auto import tqdm
import os
import csv
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
def get_date(x):
return '-'.join(x.split('-')[:3])
def get_fips(x):
return x.split('-')[-1]
def pinball_loss(y_true, y_pred, quantile = 0.5):
delta = y_true - y_pred
loss_above = np.sum(delta[delta > 0])*(quantile)
loss_below = np.sum(-1*delta[delta < 0])*(1-quantile)
return (loss_above + loss_below) / len(y_true)
def pinball_loss2(y_true, y_pred, size, quantile = 0.5):
delta = y_true - y_pred
if delta > 0:
loss = delta*quantile
else:
loss = -1*delta*(1-quantile)
return loss / size
def evaluate(test_df, user_df):
join_df = test_df.join(user_df, how = 'inner')
if(len(join_df) != len(test_df)):
sys.stderr.write("Submission not right length. \n")
raise Exception("Submission not right length")
if(user_df.isna().sum().sum() > 0 ):
sys.stderr.write("Submission contains NaN. \n")
raise Exception("Submission Contains NaN.")
if(join_df.index.equals(test_df.index) == False):
sys.stderr.write("Incorrect ID format in Submission. \n")
raise Exception("Incorrect ID format.")
total_loss = 0
for column in ['10','20','30','40','50', '60', '70', '80', '90']:
quantile = int(column) / 100.0
loss = pinball_loss(join_df['deaths'].values, join_df[column].values, quantile) / 9.0
total_loss += loss
return total_loss
def evaluate2(test_df, user_df):
county_losses = {}
join_df = test_df.join(user_df, how = 'inner')
if(len(join_df) != len(test_df)):
sys.stderr.write("Submission not right length. \n")
raise Exception("Submission not right length")
if(user_df.isna().sum().sum() > 0 ):
sys.stderr.write("Submission contains NaN. \n")
raise Exception("Submission Contains NaN.")
if(join_df.index.equals(test_df.index) == False):
sys.stderr.write("Incorrect ID format in Submission. \n")
raise Exception("Incorrect ID format.")
total_loss = 0
size = len(join_df['deaths'].values)
for index, row in join_df.iterrows():
county = index.split('-')[-1]
county_loss = 0
for column in ['10','20','30','40','50', '60', '70', '80', '90']:
quantile = int(column) / 100.0
# if county == '36061':
# print(f"{row[column]} versus {row['deaths']}")
loss = pinball_loss2(row['deaths'], row[column], size, quantile) / 9.0
county_loss += loss
total_loss += loss
if county in county_losses.keys():
county_losses[county] += county_loss
else:
county_losses[county] = county_loss
return total_loss, county_losses
def evaluator(submission, start_date):
print(f"scoring {submission}")
daily_df = pd.read_csv(f"{homedir}" + '/data/us/covid/nyt_us_counties_daily.csv')
# daily_df = pd.read_csv(f"{homedir}" + '/data/us/covid/nyt_us_counties.csv')
daily_df.loc[daily_df["county"]=='New York City', "fips"]=36061
daily_df.dropna(subset=['fips'], inplace=True)
daily_df['fips'] = daily_df['fips'].astype(int)
end_date = daily_df['date'].max()
daily_df['id'] = daily_df['date'] +'-'+ daily_df['fips'].astype(str)
preperiod_df = daily_df[(daily_df['date'] < start_date)]
daily_df = daily_df[(daily_df['date'] <= end_date) & (daily_df['date'] >= start_date)]
sample_submission = pd.read_csv(f"{homedir}"+ '/sample_submission.csv') # Load the sample submission with all 0's
sample_submission['date'] = sample_submission['id'].apply(get_date)
sample_submission['fips'] = sample_submission['id'].apply(get_fips).astype('int')
sample_submission = sample_submission[(sample_submission['date'] <= end_date) & (sample_submission['date'] >= start_date)]
# Disabled FIPS is a set of FIPS to avoid scoring. Covid_active_fips is where there has been reports of covid,
# and inactive_fips are fips codes present in sample submission but with no cases reported by the New York Times.
# New_active_fips are FIPS that were introduced into the dataset during the scoring period.
# Active FIPS should be scored against deaths data from NYT if such data is available,
# but Inactive FIPS should be scored with a target of 0.
disabled_fips = set({
## NEW YORK
36005, 36047, 36081, 36085,
## Peurto Rico
72001, 72003, 72005, 72007, 72009, 72011, 72013, 72015, 72017,
72019, 72021, 72023, 72025, 72027, 72029, 72031, 72033, 72035,
72037, 72039, 72041, 72043, 72045, 72047, 72049, 72051, 72053,
72054, 72055, 72057, 72059, 72061, 72063, 72065, 72067, 72069,
72071, 72073, 72075, 72077, 72079, 72081, 72083, 72085, 72087,
72089, 72091, 72093, 72095, 72097, 72099, 72101, 72103, 72105,
72107, 72109, 72111, 72113, 72115, 72117, 72119, 72121, 72123,
72125, 72127, 72129, 72131, 72133, 72135, 72137, 72139, 72141,
72143, 72145, 72147, 72149, 72151, 72153,
## Virgin Islands
78010, 78020, 78030})
prev_active_fips = set(preperiod_df.fips.unique())
curr_active_fips = set(daily_df.fips.unique())
all_fips = set(sample_submission.fips.unique())
covid_active_fips = prev_active_fips.intersection(all_fips).intersection(curr_active_fips) - disabled_fips
inactive_fips = all_fips - prev_active_fips - curr_active_fips - disabled_fips
new_active_fips = (curr_active_fips - prev_active_fips).intersection(all_fips) - disabled_fips
# Create a DataFrame of all 0's for inactive fips by getting those from sample submission.
inactive_df = sample_submission.set_index('fips')[['id','50']].loc[inactive_fips]
inactive_df = inactive_df.set_index('id').rename({'50':'deaths'}, axis = 1)
assert(inactive_df.sum().sum() == 0)
# Create a DataFrame of active fips from the New York Times data
active_df = daily_df.set_index('fips')[['id', 'deaths']].loc[covid_active_fips].set_index('id')
# Create dataframe for new fips
sample_search = sample_submission.set_index('fips')[['id','50']].rename({'50':'deaths'}, axis = 1)
daily_search = daily_df.set_index('fips')
new_df_arr = []
for fips in new_active_fips:
tmp_sample = sample_search.loc[[fips]].set_index('id')
tmp_daily = daily_search.loc[[fips]].set_index('id')
tmp_sample.update(tmp_daily)
tmp_sample = tmp_sample[tmp_sample.index <= tmp_daily.index.max()]
new_df_arr.append(tmp_sample)
# Join the data frames
example = None
if(len(new_active_fips) > 0):
new_df = pd.concat(new_df_arr)
example = pd.concat([inactive_df, active_df, new_df]).sort_index()
else:
example = pd.concat([inactive_df, active_df]).sort_index()
# Read some CSV for score
df = | pd.read_csv(submission) | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pandas as pd
def re_split_col(arr):
pattern = re.compile(r'(\d+)')
ret = [pattern.split(string) for string in arr]
data = [[str_list[0], ''.join(str_list[0:3]), ''.join(str_list[3:])] for str_list in ret]
data = np.array(data)
print(data)
data = pd.DataFrame(data=data)
return data
def main():
data = | pd.read_csv('./decice_name.csv') | pandas.read_csv |
"""<2018.07.24>"""
import pandas as pd
import numpy as np
s= pd.Series([9904312,3448737,2890451,2466052],index=["Seoul","Busan","Incheon","Daegue"])
#print(s)
#print(s.index)
#print(s.values)
#s.name="인구"
#s.index.name="도시"
#print(s.index.name)
#시리즈에 연산을 하면 value에만 적용된다
#print(s/100000)
#print(s[(250e4<s)&(s<500e4)])
#Pandas에서는 뒤에 나오는 숫자까지 포함하므로 주의해야한다.
#print(s[:3])
#s0=pd.Series(range(3),index=["a","b","c"])
#print(s0)
#print("서울" in s)
#for k,v in s.items():
# print("%s=%d"%(k,v))
s2=pd.Series({"Seoul":9631482,"Busan":3393191,"Incheon":2632035,"Daejoen":1490158})
#print(s2)
#딕셔너리의 원소는 순서를 가지지 않으므로 시리지의 데이터도 순서가 보장되지 않는다.
#만약 순서를 정하고 싶다면 인덱스를 리스트로 지정해야한다.
s2=pd.Series({"Seoul":9631482,"Busan":3393191,"Incheon":2632035,"Daejeon":1490158},
index=["Busan","Seoul","Incheon","Daejeon"])
#print(s2)
"""인덱스 기반 연산"""
ds=s-s2
#print(ds)
#print(s.values-s2.values)
#print(ds.notnull())
#print(ds[ds.notnull()])
#rs=(s-s2)/s2*100
#rs=rs[rs.notnull()]
#print(rs)
"""데이터 수정"""
#rs["Busan"]=1.63
#print(rs)
##데이터 추가
#rs["Daegue"]=1.41
#print(rs)
##데이터 삭제
#del rs["Seoul"]
#print(rs)
#volleyball=pd.Series({"receive":76.1,"spike":42.7,"toss":65.3,"dig":22.7,"attack":52.3,"defense":42.75},
# index=["attack","spike","defense","dig","receive","toss"])
#volleyball.name="KEPCO"
#print(volleyball)
#soccer=pd.Series({"pass":65.2,"counterattack":24.5,"defense":67.2,"attack":45.2,"shot":42.2,"tackle":12.4},
# index=["attack","counterattack","shot","pass","defense","tackle"])
#soccer.name="Mancity"
#print(soccer)
#log=volleyball-soccer
#print(log)
"""데이터프레임 클래스"""
data={
"2015": [9904312, 3448737, 2890451, 2466052],
"2010": [9631482, 3393191, 2632035, 2431774],
"2005": [9762546, 3512547, 2517680, 2456016],
"2000": [9853972, 3655437, 2466338, 2473990],
"지역":["수도권","경상권","수도권","경상권"],
"2010-2015 증가율":[0.0283,0.0163,0.0982,0.0141]
}
columns=["지역","2015","2010","2005","2000","2010-2015 증가율"]
index=["서울","부산","인천","대구"]
df=pd.DataFrame(data,index=index,columns=columns)
#print(df)
#열방향 인덱스와 행방향 인덱스 붙히기
df.index.name="도시"
df.columns.name="특성"
#print(df)
result={
"Point":[100,81,77,75,70],
"Win":[32,25,23,21,21],
"Draw":[4,6,8,12,7],
"Lose":[2,7,7,5,10],
"Goal difference":[79,40,38,46,24]}
items=["Point","Win","Draw","Lose","Goal difference"]
Team_name=["MCI","MUN","TOT","LIV","CHE"]
league=pd.DataFrame(result,index=Team_name,columns=items)
#print(league)
#데이터 프레임에 T를 붙혀서 전치(Transpose)를 하는것이 가능하다.
#print(league.T)
#print(league[["Win","Draw","Lose"]])
df2=pd.DataFrame(np.arange(12).reshape(3,4))
#print(df2)
df["2010-2015 증가율"]=df["2010-2015 증가율"]*100
#print(df)
#print(df[1:3])
data={
"Korea":[80,90,70,30],
"English":[90,70,60,40],
"Math":[90,60,80,70],}
columns=["Korea","English","Math"]
index=["Kim","Lee","Park","Choi"]
df=pd.DataFrame(data,columns=columns,index=index)
#print(df)
#1.모든 학생의 수학 점수를 시리즈로 나타낸다.
#print(df[["Math"]])
#2.모든 학생의 국어와 영어 점수를 데이터 프레임으로 나타낸다.
#print(df[["English","Korea"]])
#3.모든 학생의 각 과목 평균 점수를 새로운 열로 추가한다.
#axis=1이 행 기준으로 평균을 구하라는 의미로 해석
avg=df.mean(axis=1)
df["Average"]=avg
#print(df)
#4.Choi의 영어 점수를 80점으로 수정하고 평균 점수도 다시 계산한다.
#df.loc["Choi","English"]=80
#print(df)
#avg=df.mean(axis=1)
#df["Average"]=avg
#print(df)
#문제 해결해야 한다.
#Kim의 점수를 데이터프레임으로 나타낸다.
#print(df.iloc[0])
#Park의 점수를 시리즈로 나타낸다.
#print(df.iloc[2])
"""데이터프레임 인덱서"""
box=pd.DataFrame(np.arange(10,22).reshape(3,4),
index=["r1","r2","r3"],
columns=["c1","c2","c3","c4"])
#print(box)
"""loc인덱서"""
#df.loc[행인덱스(row),열인덱스(column)]와 같은 형태로 사용한다.
#print(box.loc["r1","c2"])
#print(box.loc["r1":,"c3"])
#print(box.loc["r2":,"c2":])
#특정 조건에 해당하는 것만 추출
#print(box.loc[box.c1>10])
#print(box.loc["r1",:])
#print(box[:1])
#열 데이터 추가
#box["c5"]=[14,18,22]
#print(box)
#행 데이터 출가
#box.loc["r4"]=[90,91,92,93,94]
#print(box)
#행 데이터 추가 / 제거
#box.loc["r5"]=[100,101,102,103,104]
#print(box)
#box=box.drop("r5")
#print(box)
box2=pd.DataFrame(np.arange(10,26).reshape(4,4),
columns=np.arange(1,8,2))
#print(box2)
#print(box2.loc[1,1])
#print(box2.loc[1:2,:])
"""iloc인덱서"""
#정수 인덱스만 방는다
#box의 0행 1열 데이터
#print(box.iloc[0,1])
#print(box.iloc[:2,2])
"""<2018.07.25>"""
"""데이터 갯수 세기"""
#10행의 데이터 생성
s=pd.Series(range(10))
#3번 인덱스에 NAN 생성
s[3]=np.nan
#print(s)
#count는 NAN의 개수를 세지 않는다.
#print("s의 NAN을 제외한 갯수는 {}".format(s.count()))
np.random.seed(2)
df=pd.DataFrame(np.random.randint(5,size=(4,4)),dtype=float)
df.iloc[2,3]=np.nan
#print(df)
#각 열마다 별도의 데이터 갯수를 세어주므로 데이터가 누락된 것을 찾을 수 있다.
#print(df.count())
"""연습 문제 1
다음 명령으로 타이타닉호 승객 데이터를 데이터프레임으로 읽어온다. 이 명령을 실행하려면 seaborn 패키지가 설치되어 있어야 한다.
import seaborn as sns
titanic = sns.load_dataset("titanic")
타이타닉호 승객 데이터의 데이터 값을 각 열마다 구해본다.
"""
import seaborn as sns
titanic=sns.load_dataset("titanic")
#print(titanic["age"].value_counts())
#print(titanic.head())
#print(titanic.count())
"""카테고리 값 세기"""
np.random.seed(1)
s2=pd.Series(np.random.randint(6,size=100))
#print(s2)
#tail()뒤에서 몇개만 보여준다
#print(s2.tail())
# 시리즈의 값이 정수, 문자열, 카테고리 값인 경우에 value_counts()는 값별로 몇개씩 존재하는지 알려준다.
#print(s2.value_counts())
"""정렬"""
#인덱스 기준 정렬
#print(s2.value_counts().sort_index())
#Value 기준 정렬
#print(s2.value_counts().sort_values())
#NaN값이 있는 경우에는 정렬하면 NAN값이 가장 나중에 나온다.
ran=pd.Series(range(10))
ran[8]=np.nan
#print(ran)
#print(ran.sort_values())
#큰 수에서 작은 수로 반대 정렬하려면 ascending=False로 지정
#print(ran.sort_values(ascending=False))
#sort_values메서드를 사용하려면 by인수로 정렬 기준이 되는 열을 지정할 수 있다.
#print(df.sort_values(by=1))
#print(df.sort_values(by=[1,2]))
"""
연습 문제 2
타이타닉호 승객중 성별(sex) 인원수, 나이별(age) 인원수, 선실별(class) 인원수, 사망/생존(alive) 인원수를 구하라.
"""
#print("Titanic의 탑승객 성별 구성은 {}".format(titanic["sex"].value_counts()))
#print("Titanic의 탑승객 연령별 구성은 {}".format(titanic["age"].value_counts().head()))
#print("Titanic의 선실별 인원 구성은 {}".format(titanic["class"].value_counts()))
#print("Titanic의 생존 인원수는 {}".format(titanic["alive"].value_counts()))
"""행/열 합계"""
#df2=pd.DataFrame(np.random.randint(10,size=(4,8)))
#print(df2)
##행별로 합계 구하기
#print(df2.sum(axis=1))
##열별로 합계 구하기
#print(df2.sum(axis=0))
#print(df2.sum())
#df2["RowSum"]=df2.sum(axis=1)
#print(df2)
#df2.loc["ColTotal",:]=df2.sum()
#print(df2)
"""apply변환"""
#행이나 열 단위로 더 복잡한 처리를 하고 싶을 때는 apply 메서드를 사용한다.
#인수로 행 또는 열 을 받는 함수를 apply 메서드의 인수로 넣으면 각 열(또는 행)을 반복하여 그 함수에 적용시킨다.
df3=pd.DataFrame({
'A':[1,3,4,3,4],
'B':[2,3,1,2,3],
'C':[1,5,2,4,4]
})
#print(df3)
#각 열의 최대값과 최소값의 차이를 구하고 싶으면 다음과 같은 람다 함수를 넣는다.
#print("각 열의 최대값과 최솟값의 차 \n{}".format(df3.apply(lambda x:x.max()-x.min())))
#만일 각 행에 대해서 적용하고 싶다면 axis=1의 인수를 사용한다.
#print("각 행의 최대값과 최솟값의 차 \n{}".format(df3.apply(lambda x:x.max()-x.min(),axis=1)))
#각 열에 대해 어떤값이 얼마나 사용되었는지 알고 싶다면 value_counts 함수를 넣을 수 있다.
#print(df3.apply(pd.value_counts))
#NaN값은 fillna 메서드를 사용하여 원하는 값으로 바꿀 수 있다.
#astype 메서드로 전체 데이터의 자료형을 바꾸는것도 가능하다.
#print(df3.apply(pd.value_counts).fillna(0).astype(int))
"""실수 값을 카테고리 값으로 변환(일정 범위에 데이터 넣기)"""
#cut:실수 값의 경계선을 지정하는 경우
#qcut:갯수가 똑같은 구간으로 나누는 경우
ages=[0,2,10,21,23,37,61,20,41,32,100]
bins=[1,15,25,35,60,99]
labels=["미성년자","청년","중년","장년","노년"]
cats=pd.cut(ages,bins,labels=labels)
#print(cats)
df4=pd.DataFrame(ages,columns=["ages"])
df4["age_cat"]=pd.cut(df4.ages,bins,labels=labels)
#print(df4)
#qcut 명령은 구간 경계선을 지정하지 않고 데이터 갯수가 같도록 지정한 수의 구간으로 나눈다.
#예를 들어 다음 코드는 1000개의 데이터를 4개의 구간으로 나누는데 각 구간은 250개씩의 데이터를 가진다.
data=np.random.randn(1000)
cats=pd.qcut(data,4,labels=["Q1","Q2","Q3","Q4"])
#print(cats)
#해당 데이터에 어떤 열이 있는 확인해보려고 ㅎㅎㅎ
#print(titanic.count())
#특정 조건을 가지는 행렬 추출하는 방법!!!
#old=titanic[titanic["age"]>40]
#print(old)
#산 사람과 죽은 사람 분류
alive=titanic[titanic["alive"]=='yes']
dye=titanic[titanic["alive"]=='no']
#산 사람과 죽은 사람 명수 확인
#print("alive:{} dye:{}".format(len(alive),len(dye)))
age_group=[1,19,30,40,60,99]
level=["미성년자","청년층","중년","장년","노년"]
alive_clf=pd.cut(alive["age"],age_group,labels=level)
#print("<타이타닉에 탄 승객 연령별 인원수>\n{}".format(titanic_clf))
titanic_clf= | pd.cut(titanic["age"],age_group,labels=level) | pandas.cut |
#!/usr/bin/env python
# coding: utf-8
# # SLIDING WINDOW SPLIT
# ### LOAD LIBRARIES
# In[ ]:
import os
import gc
import warnings
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
register_matplotlib_converters()
sns.set()
# ### GLOBAL VARIABLES
# In[ ]:
INPUT_PATH = '../../data/features'
OUTPUT_PATH = '../../data/train_test'
INPUT_FILE_NAME = 'features_v005'
N_SPLITS = 3 # numbers of folds
DAY_COL = 'd'
DATE_COL = "date"
D_THRESH = 1941 - int(365 * 2) # he only left 2 years of training data, from 2014-05-23 to 2016-05-24
DAYS_PRED = 28
# ### FUNCTIONS
# In[ ]:
class CustomTimeSeriesSplitter:
def __init__(self, n_splits=5, train_days=80, test_days=20, day_col="d"):
self.n_splits = n_splits
self.train_days = train_days
self.test_days = test_days
self.day_col = day_col
def split(self, X, y=None, groups=None):
SEC_IN_DAY = 3600 * 24
sec = (X[self.day_col] - X[self.day_col].iloc[0]) * SEC_IN_DAY
duration = sec.max()
train_sec = self.train_days * SEC_IN_DAY
test_sec = self.test_days * SEC_IN_DAY
total_sec = test_sec + train_sec
if self.n_splits == 1:
train_start = duration - total_sec
train_end = train_start + train_sec
train_mask = (sec >= train_start) & (sec < train_end)
test_mask = sec >= train_end
yield sec[train_mask].index.values, sec[test_mask].index.values
else:
# step = (duration - total_sec) / (self.n_splits - 1)
step = DAYS_PRED * SEC_IN_DAY
for idx in range(self.n_splits):
# train_start = idx * step
shift = (self.n_splits - (idx + 1)) * step
train_start = duration - total_sec - shift
train_end = train_start + train_sec
test_end = train_end + test_sec
train_mask = (sec > train_start) & (sec <= train_end)
if idx == self.n_splits - 1:
test_mask = sec > train_end
else:
test_mask = (sec > train_end) & (sec <= test_end)
yield sec[train_mask].index.values, sec[test_mask].index.values
def get_n_splits(self):
return self.n_splits
# In[ ]:
def show_cv_days(cv, X, dt_col, day_col):
for ii, (tr, tt) in enumerate(cv.split(X)):
print(f"----- Fold: ({ii + 1} / {cv.n_splits}) -----")
tr_start = X.iloc[tr][dt_col].min()
tr_end = X.iloc[tr][dt_col].max()
tr_days = X.iloc[tr][day_col].max() - X.iloc[tr][day_col].min() + 1
tt_start = X.iloc[tt][dt_col].min()
tt_end = X.iloc[tt][dt_col].max()
tt_days = X.iloc[tt][day_col].max() - X.iloc[tt][day_col].min() + 1
df = pd.DataFrame(
{
"start": [tr_start, tt_start],
"end": [tr_end, tt_end],
"days": [tr_days, tt_days],
},
index=["train", "test"],
)
display(df)
# In[ ]:
def plot_cv_indices(cv, X, dt_col, lw=10):
n_splits = cv.get_n_splits()
_, ax = plt.subplots(figsize=(20, n_splits))
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
ax.scatter(
X[dt_col],
[ii + 0.5] * len(indices),
c=indices,
marker="_",
lw=lw,
cmap=plt.cm.coolwarm,
vmin=-0.2,
vmax=1.2,
)
# Formatting
MIDDLE = 15
LARGE = 20
ax.set_xlabel("Datetime", fontsize=LARGE)
ax.set_xlim([X[dt_col].min(), X[dt_col].max()])
ax.set_ylabel("CV iteration", fontsize=LARGE)
ax.set_yticks(np.arange(n_splits) + 0.5)
ax.set_yticklabels(list(range(n_splits)))
ax.invert_yaxis()
ax.tick_params(axis="both", which="major", labelsize=MIDDLE)
ax.set_title("{}".format(type(cv).__name__), fontsize=LARGE)
return ax
# In[ ]:
def reduce_mem_usage(df, verbose=False):
start_mem = df.memory_usage().sum() / 1024 ** 2
int_columns = df.select_dtypes(include=["int"]).columns
float_columns = df.select_dtypes(include=["float"]).columns
for col in int_columns:
df[col] = | pd.to_numeric(df[col], downcast="integer") | pandas.to_numeric |
# encoding: utf-8
from opendatatools.common import RestAgent
from progressbar import ProgressBar
import demjson
import json
import pandas as pd
fund_type = {
"全部开放基金" : {"t": 1, "lx": 1},
"股票型基金" : {"t": 1, "lx": 2},
"混合型基金" : {"t": 1, "lx": 3},
"债券型基金" : {"t": 1, "lx": 4},
"指数型基金" : {"t": 1, "lx": 5},
"ETF联接基金" : {"t": 1, "lx": 6},
"LOF基金" : {"t": 1, "lx": 8},
"分级基金" : {"t": 1, "lx": 9},
"FOF基金" : {"t": 1, "lx": 15},
"理财基金" : {"t": 5},
"分级A" : {"t": 6},
"货币基金" : {"t": 7},
}
class EastMoneyAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def _get_and_parse_js(self, url, prefix, param=None):
response = self.do_request(url, param=param)
if not response.startswith(prefix):
return None
else:
return response[len(prefix):]
def get_fund_company(self):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?t=3'
prefix = 'var gs='
response = self._get_and_parse_js(url, prefix)
if response is None:
return None, '获取数据失败'
jsonobj = demjson.decode(response)
df = pd.DataFrame(jsonobj['op'])
df.columns = ['companyid', 'companyname']
return df, ''
def _get_fund_list_onepage(self, company='', page_no = 1, page_size = 100):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?page=%d,%d&gsid=%s' % (page_no, page_size, company)
prefix = 'var db='
response = self.do_request(url)
response = self._get_and_parse_js(url, prefix)
if response is None:
return None, '获取数据失败'
jsonobj = demjson.decode(response)
rsp = jsonobj['datas']
datestr = jsonobj['showday']
df = pd.DataFrame(rsp)
if len(df) > 0:
df.drop(df.columns[5:], axis=1, inplace=True)
df.columns = ['fundcode', 'fundname', 'pingyin', 'nav', 'accu_nav']
df['date'] = datestr[0]
return df, ''
else:
return None, ''
def get_fundlist_by_company(self, companyid):
page_no = 1
page_size = 1000
df_result = []
while True:
df, msg = self._get_fund_list_onepage(company=companyid, page_no=page_no, page_size=page_size)
if df is not None:
df_result.append(df)
if df is None or len(df) < page_size:
break
page_no = page_no + 1
if len(df_result) > 0:
return pd.concat(df_result), ''
else:
return None, ''
def get_fund_list(self):
df_company, msg = self.get_fund_company()
if df_company is None:
return None, msg
df_result = []
process_bar = ProgressBar().start(max_value=len(df_company))
for index, row in df_company.iterrows():
companyid = row['companyid']
companyname = row['companyname']
df, msg = self.get_fundlist_by_company(companyid)
if df is not None:
df['companyname'] = companyname
df['companyid'] = companyid
df_result.append(df)
process_bar.update(index+1)
return pd.concat(df_result), ''
def get_fund_type(self):
return fund_type.keys()
def _get_fundlist_by_type_page(self, type, page_no = 1, page_size = 100):
url = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?page=%d,%d' % (page_no, page_size)
prefix = 'var db='
type_param = fund_type[type]
response = self._get_and_parse_js(url, prefix, param=type_param)
jsonobj = demjson.decode(response)
rsp = jsonobj['datas']
datestr = jsonobj['showday']
df = pd.DataFrame(rsp)
if len(df) > 0:
df.drop(df.columns[5:], axis=1, inplace=True)
df.columns = ['fundcode', 'fundname', 'pingyin', 'nav', 'accu_nav']
df['date'] = datestr[0]
return df, ''
else:
return None, '获取数据失败'
def get_fundlist_by_type(self, type):
if type not in fund_type:
return None, '不正确的基金类型,请通过get_fund_type查询'
type_param = fund_type[type]
page_no = 1
page_size = 1000
df_result = []
while True:
df, msg = self._get_fundlist_by_type_page(type, page_no, page_size)
if df is not None:
df_result.append(df)
if df is None or len(df)< page_size:
break
page_no = page_no + 1
df = pd.concat(df_result)
df['fund_type'] = type
return df, ''
def get_fund_nav(self, fund_code):
url = 'http://api.fund.eastmoney.com/f10/lsjz'
self.add_headers({'Referer': 'http://fund.eastmoney.com/f10/jjjz_%s.html' % fund_code})
page_no = 1
page_size = 1000
df_result = []
while True:
data = {
'fundCode' : fund_code,
'pageIndex': page_no,
'pageSize' : page_size,
}
response = self.do_request(url, param=data)
jsonobj = json.loads(response)
err_code = jsonobj['ErrCode']
err_msg = jsonobj['ErrMsg']
if err_code != 0:
return None, err_msg
rsp = jsonobj['Data']['LSJZList']
df = | pd.DataFrame(rsp) | pandas.DataFrame |
""" test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestAtAndiAT:
# at and iat tests that don't need Base class
def test_float_index_at_iat(self):
ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in ser.items():
assert ser.at[el] == item
for i in range(len(ser)):
assert ser.iat[i] == i + 1
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range("1/1/2000", periods=8)
df = DataFrame(np.random.randn(8, 4), index=dates, columns=["A", "B", "C", "D"])
s = df["A"]
result = s.at[dates[5]]
xp = s.values[5]
assert result == xp
# GH 7729
# make sure we are boxing the returns
s = Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]")
expected = Timestamp("2014-02-02")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
s = Series(["1 days", "2 days"], dtype="timedelta64[ns]")
expected = Timedelta("2 days")
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
assert result == expected
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64")
result = s.iloc[2]
assert result == 2
result = s.iat[2]
assert result == 2
msg = "index 10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[10]
msg = "index -10 is out of bounds for axis 0 with size 5"
with pytest.raises(IndexError, match=msg):
s.iat[-10]
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype="int64")
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
assert result == 2
def test_frame_at_with_duplicate_axes(self):
# GH#33041
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
result = df.at[0, "A"]
expected = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.T.at["A", 0]
tm.assert_series_equal(result, expected)
# setter
df.at[1, "A"] = 2
expected = Series([2.0, 2.0], index=["A", "A"], name=1)
tm.assert_series_equal(df.iloc[1], expected)
def test_at_getitem_dt64tz_values(self):
# gh-15822
df = DataFrame(
{
"name": ["John", "Anderson"],
"date": [
Timestamp(2017, 3, 13, 13, 32, 56),
Timestamp(2017, 2, 16, 12, 10, 3),
],
}
)
df["date"] = df["date"].dt.tz_localize("Asia/Shanghai")
expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai")
result = df.loc[0, "date"]
assert result == expected
result = df.at[0, "date"]
assert result == expected
def test_mixed_index_at_iat_loc_iloc_series(self):
# GH 19860
s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
for el, item in s.items():
assert s.at[el] == s.loc[el] == item
for i in range(len(s)):
assert s.iat[i] == s.iloc[i] == i + 1
with pytest.raises(KeyError, match="^4$"):
s.at[4]
with pytest.raises(KeyError, match="^4$"):
s.loc[4]
def test_mixed_index_at_iat_loc_iloc_dataframe(self):
# GH 19860
df = DataFrame(
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2]
)
for rowIdx, row in df.iterrows():
for el, item in row.items():
assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item
for row in range(2):
for i in range(5):
assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i
with pytest.raises(KeyError, match="^3$"):
df.at[0, 3]
with pytest.raises(KeyError, match="^3$"):
df.loc[0, 3]
def test_iat_setter_incompatible_assignment(self):
# GH 23236
result = DataFrame({"a": [0, 1], "b": [4, 5]})
result.iat[0, 0] = None
expected = DataFrame({"a": [None, 1], "b": [4, 5]})
tm.assert_frame_equal(result, expected)
def test_iat_dont_wrap_object_datetimelike():
# GH#32809 .iat calls go through DataFrame._get_value, should not
# call maybe_box_datetimelike
dti = date_range("2016-01-01", periods=3)
tdi = dti - dti
ser = Series(dti.to_pydatetime(), dtype=object)
ser2 = Series(tdi.to_pytimedelta(), dtype=object)
df = DataFrame({"A": ser, "B": ser2})
assert (df.dtypes == object).all()
for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]:
assert result is ser[0]
assert isinstance(result, datetime)
assert not isinstance(result, Timestamp)
for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]:
assert result is ser2[1]
assert isinstance(result, timedelta)
assert not isinstance(result, Timedelta)
def test_at_with_tuple_index_get():
# GH 26989
# DataFrame.at getter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
assert df.at[(1, 2), "a"] == 1
# Series.at getter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
assert series.at[(1, 2)] == 1
def test_at_with_tuple_index_set():
# GH 26989
# DataFrame.at setter works with Index of tuples
df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)])
assert df.index.nlevels == 1
df.at[(1, 2), "a"] = 2
assert df.at[(1, 2), "a"] == 2
# Series.at setter works with Index of tuples
series = df["a"]
assert series.index.nlevels == 1
series.at[1, 2] = 3
assert series.at[1, 2] == 3
class TestMultiIndexScalar:
def test_multiindex_at_get(self):
# GH 26989
# DataFrame.at and DataFrame.loc getter works with MultiIndex
df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]])
assert df.index.nlevels == 2
assert df.at[(1, 3), "a"] == 1
assert df.loc[(1, 3), "a"] == 1
# Series.at and Series.loc getter works with MultiIndex
series = df["a"]
assert series.index.nlevels == 2
assert series.at[1, 3] == 1
assert series.loc[1, 3] == 1
def test_multiindex_at_set(self):
# GH 26989
# DataFrame.at and DataFrame.loc setter works with MultiIndex
df = | DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]]) | pandas.DataFrame |
from unittest import TestCase
import pandas as pd
from datamatch.filters import DissimilarFilter, NonOverlappingFilter
class DissimilarFilterTestCase(TestCase):
def test_valid(self):
f = DissimilarFilter('agency')
index = ['agency', 'uid']
self.assertFalse(f.valid(
pd.Series(['slidell pd', '123'], index=index),
pd.Series(['slidell pd', '456'], index=index)
))
self.assertTrue(f.valid(
pd.Series(['gretna pd', '123'], index=index),
pd.Series(['slidell pd', '456'], index=index)
))
def test_ignore_key_error(self):
index = ['agency', 'uid']
series_a = pd.Series(['slidell pd', '123'], index=index)
series_b = pd.Series(['slidell pd', '456'], index=index)
self.assertRaises(
KeyError,
lambda: DissimilarFilter('first').valid(series_a, series_b)
)
self.assertTrue(DissimilarFilter(
'first', ignore_key_error=True
).valid(series_a, series_b))
class NonOverlappingFilterTestCase(TestCase):
def test_valid(self):
f = NonOverlappingFilter('start', 'end')
index = ['uid', 'start', 'end']
self.assertFalse(f.valid(
pd.Series(['123', 0, 4], index=index),
pd.Series(['456', 3, 6], index=index)
))
self.assertFalse(f.valid(
pd.Series(['123', 10, 14], index=index),
pd.Series(['456', 3, 16], index=index)
))
self.assertFalse(f.valid(
pd.Series(['123', 0, 4], index=index),
pd.Series(['456', 3, 3], index=index)
))
self.assertFalse(f.valid(
pd.Series(['123', 10, 14], index=index),
pd.Series(['456', 3, 11], index=index)
))
self.assertTrue(f.valid(
pd.Series(['123', 10, 10], index=index),
pd.Series(['456', 3, 6], index=index)
))
self.assertTrue(f.valid(
| pd.Series(['123', 10, 12], index=index) | pandas.Series |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info ", value = False, key = session_state.id):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data) ', value = False, key = session_state.id):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_multi_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.beta_container()
with data_visualization_container:
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.beta_expander("Explore visualization types ", expanded = False)
with dev_expander_dv:
if df.shape[1] > 0 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = session_state.id)
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.beta_columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy = st.selectbox('Select variable for y-axis', yy_options, key = session_state.id)
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = session_state.id)
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.beta_columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred"),).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# scatter matrix
#Check If variables are numeric
num_cols=[]
for column in df:
if df[column].dtypes in ('float', 'float64', 'int','int64'):
num_cols.append(column)
if len(num_cols)>1:
show_scatter_matrix=st.checkbox('Show scatter matrix',value=False,key= session_state.id)
if show_scatter_matrix==True:
multi_var_sel = st.multiselect('Select variables for scatter matrix', num_cols, num_cols, key = session_state.id)
if len(multi_var_sel)<2:
st.error("ERROR: Please choose at least two variables fro a scatterplot")
else:
#Plot scatter matrix:
scatter_matrix=alt.Chart(df[multi_var_sel]).mark_circle().encode(
x=alt.X(alt.repeat("column"), type='quantitative'),
y=alt.Y(alt.repeat("row"), type='quantitative')
).properties(
width=150,
height=150
).repeat(
row=multi_var_sel,
column=multi_var_sel
).interactive()
st.altair_chart(scatter_matrix, use_container_width=True)
#------------------------------------------------------------------------------------------
# REGRESSION
if analysis_type == "Regression":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container = st.beta_container()
with data_machinelearning_container:
st.header("**Multivariate data modelling**")
st.markdown("Go for creating predictive models of your data using classical and machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please transform the binary response variable into a numeric binary categorization in data processing preferences!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric or binary response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Non-continuous variables are treated as continuous!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms based on chosen response variable
# Binary (has to be integer or float)
if var_cat.loc[response_var] == "binary":
algorithms = ["Multiple Linear Regression", "Logistic Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "binary"
# Multi-class (has to be integer, currently treated as continuous response)
elif var_cat.loc[response_var] == "categorical":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
# Continuous
elif var_cat.loc[response_var] == "numeric":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
# MLR + binary info message
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression") and response_var_type == "binary":
st.warning("WARNING: For Multiple Linear Regression only the full model output will be determined.")
st.markdown("**Model-specific settings**")
# Multiple Linear Regression settings
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
MLR_intercept = "Yes"
MLR_cov_type = "non-robust"
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
if st.checkbox("Adjust settings for Multiple Linear Regression"):
col1, col2 = st.beta_columns(2)
with col1:
MLR_intercept = st.selectbox("Include intercept", ["Yes", "No"])
with col2:
MLR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0", "HC1", "HC2", "HC3"])
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
st.write("")
# Logistic Regression settings
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
LR_intercept = "Yes"
LR_cov_type = "non-robust"
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
if st.checkbox("Adjust settings for Logistic Regression"):
col1, col2 = st.beta_columns(2)
with col1:
LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
with col2:
LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
st.write("")
# Generalized Additive Models settings
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
gam_finalPara["intercept"] = "Yes"
gam_finalPara["number of splines"] = 20
gam_finalPara["spline order"] = 3
gam_finalPara["lambda"] = 0.6
gam_lam_search = "No"
if st.checkbox("Adjust settings for Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
col1, col2 = st.beta_columns(2)
with col1:
gam_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
gam_finalPara["intercept"] = gam_intercept
with col2:
gam_lam_search = st.selectbox("Search for lambda ", ["No", "Yes"])
if gam_lam_search == "Yes":
ls_col1, ls_col2, ls_col3 = st.beta_columns(3)
with ls_col1:
ls_min = st.number_input("Minimum lambda value", value=0.001, step=1e-3, min_value=0.001, format="%.3f")
with ls_col2:
ls_max = st.number_input("Maximum lambda value", value=100.000, step=1e-3, min_value=0.002, format="%.3f")
with ls_col3:
ls_number = st.number_input("Lambda values per variable", value=50, min_value=2)
if ls_number**len(expl_var) > 10000:
st.warning("WARNING: Your grid has " + str(ls_number**len(expl_var)) + " combinations. Please note that searching for lambda will take a lot of time!")
else:
st.info("Your grid has " + str(ls_number**len(expl_var)) + " combinations.")
if gam_lam_search == "No":
gam_col1, gam_col2, gam_col3 = st.beta_columns(3)
if gam_lam_search == "Yes":
gam_col1, gam_col2= st.beta_columns(2)
gam_nos_values = []
gam_so_values = []
gam_lam_values = []
for gset in range(0,len(expl_var)):
var_name = expl_var[gset]
with gam_col1:
nos = st.number_input("Number of splines (" + var_name + ")", value = 20, min_value=1)
gam_nos_values.append(nos)
with gam_col2:
so = st.number_input("Spline order (" + var_name + ")", value = 3, min_value=3)
gam_so_values.append(so)
if gam_lam_search == "No":
with gam_col3:
lam = st.number_input("Lambda (" + var_name + ")", value = 0.6, min_value=0.001, step=1e-3, format="%.3f")
gam_lam_values.append(lam)
if nos <= so:
st.error("ERROR: Please make sure that the number of splines is greater than the spline order for "+ str(expl_var[gset]) + "!")
return
if gam_lam_search == "Yes":
lam = np.round(np.linspace(ls_min, ls_max, ls_number),3)
if len(expl_var) == 1:
gam_lam_values = lam
else:
gam_lam_values = [lam] * len(expl_var)
gam_finalPara.at["value", "number of splines"] = gam_nos_values
gam_finalPara.at["value","spline order"] = gam_so_values
gam_finalPara.at["value","lambda"] = gam_lam_values
st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [100]
brt_finalPara["learning rate"] = [0.1]
brt_finalPara["maximum tree depth"] = [3]
brt_finalPara["sample rate"] = [1]
final_hyPara_values["brt"] = brt_finalPara
if st.checkbox("Adjust settings for Boosted Regression Trees "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_finalPara["number of trees"] = st.number_input("Number of trees ", value=100, step=1, min_value=1)
with col2:
brt_finalPara["learning rate"] = st.slider("Learning rate ", value=0.1, min_value=0.001, max_value=0.1 , step=1e-3, format="%.3f")
with col3:
brt_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=3, step=1, min_value=1, max_value=30)
with col4:
brt_finalPara["sample rate"] = st.slider("Sample rate ", value=1.0, step=0.01, min_value=0.5, max_value=1.0)
final_hyPara_values["brt"] = brt_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search", "Bayes optimization", "sequential model-based optimization"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_tunePara["number of trees"] = [50, 500]
brt_tunePara["learning rate"] = [0.001, 0.010]
brt_tunePara["maximum tree depth"] = [2, 10]
brt_tunePara["sample rate"] = [0.8, 1.0]
hyPara_values["brt"] = brt_tunePara
if st.checkbox("Adjust tuning settings for Boosted Regression Trees"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_tunePara["number of trees"] = st.slider("Range for number of trees", 50, 1000, [50, 500])
with col2:
brt_tunePara["learning rate"] = st.slider("Range for learning rate", 0.001, 0.1, [0.001, 0.02], step=1e-3, format="%.3f")
with col3:
brt_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth", 1, 30, [2, 10])
with col4:
brt_tunePara["sample rate"] = st.slider("Range for sample rate", 0.5, 1.0, [0.8, 1.0])
hyPara_values["brt"] = brt_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
else:
st.info("All variables are available for predictions!")
# Check if NAs are present and delete them automatically
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show machine learning summary
if st.checkbox('Show a summary of machine learning settings', value = False):
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.write("Algorithms summary:")
st.write("- Models:", ', '.join(sb_ML_alg))
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
# st.write("- Multiple Linear Regression model: ", MLR_model)
st.write("- Multiple Linear Regression including intercept: ", MLR_intercept)
st.write("- Multiple Linear Regression covariance type: ", MLR_cov_type)
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.write("- Logistic Regression including intercept: ", LR_intercept)
st.write("- Logistic Regression covariance type: ", LR_cov_type)
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.write("- Generalized Additive Models parameters: ")
st.write(gam_finalPara)
if any(a for a in sb_ML_alg if a == "Random Forest") and do_hypTune == "No":
st.write("- Random Forest parameters: ")
st.write(rf_finalPara)
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees") and do_hypTune == "No":
st.write("- Boosted Regression Trees parameters: ")
st.write(brt_finalPara)
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks") and do_hypTune == "No":
st.write("- Artificial Neural Networks parameters: ")
st.write(ann_finalPara)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# Hyperparameter settings summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks" or a == "Boosted Regression Trees" or a == "Random Forest"):
st.write("Hyperparameter-tuning settings summary:")
if do_hypTune == "No":
st.write("- ", do_hypTune_no)
st.write("")
if do_hypTune == "Yes":
st.write("- Search method:", hypTune_method)
st.write("- ", hypTune_nCV, "-fold cross-validation")
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
st.write("- ", hypTune_iter, "iterations in search")
st.write("")
# Random Forest summary
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.write("Random Forest tuning settings summary:")
st.write(rf_tunePara)
# Boosted Regression Trees summary
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.write("Boosted Regression Trees tuning settings summary:")
st.write(brt_tunePara)
# Artificial Neural Networks summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.write("Artificial Neural Networks tuning settings summary:")
st.write(ann_tunePara.style.format({"L² regularization": "{:.5}"}))
#st.caption("** Learning rate is only used in adam")
st.write("")
# General settings summary
st.write("General settings summary:")
st.write("- Response variable type: ", response_var_type)
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run models")
st.write("")
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
#Hyperparameter
if do_hypTune == "Yes":
# Tuning
model_tuning_results = ml.model_tuning(df, sb_ML_alg, hypTune_method, hypTune_iter, hypTune_nCV, hyPara_values, response_var_type, response_var, expl_var)
# Save final hyperparameters
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tuning_results = model_tuning_results["rf tuning"]
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [rf_tuning_results.loc["value"]["number of trees"]]
if [rf_tuning_results.loc["value"]["maximum tree depth"]][0] == "None":
rf_finalPara["maximum tree depth"] = None
else:
rf_finalPara["maximum tree depth"] = [rf_tuning_results.loc["value"]["maximum tree depth"]]
rf_finalPara["maximum number of features"] = [rf_tuning_results.loc["value"]["maximum number of features"]]
rf_finalPara["sample rate"] = [rf_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["rf"] = rf_finalPara
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tuning_results = model_tuning_results["brt tuning"]
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [brt_tuning_results.loc["value"]["number of trees"]]
brt_finalPara["learning rate"] = [brt_tuning_results.loc["value"]["learning rate"]]
brt_finalPara["maximum tree depth"] = [brt_tuning_results.loc["value"]["maximum tree depth"]]
brt_finalPara["sample rate"] = [brt_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["brt"] = brt_finalPara
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tuning_results = model_tuning_results["ann tuning"]
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"]) #"learning rate schedule", "momentum", "epsilon"])
ann_finalPara["weight optimization solver"] = [ann_tuning_results.loc["value"]["weight optimization solver"]]
ann_finalPara["maximum number of iterations"] = [ann_tuning_results.loc["value"]["maximum number of iterations"]]
ann_finalPara["activation function"] = [ann_tuning_results.loc["value"]["activation function"]]
ann_finalPara["hidden layer sizes"] = [ann_tuning_results.loc["value"]["hidden layer sizes"]]
ann_finalPara["learning rate"] = [ann_tuning_results.loc["value"]["learning rate"]]
#ann_finalPara["learning rate schedule"] = [ann_tuning_results.loc["value"]["learning rate schedule"]]
#ann_finalPara["momentum"] = [ann_tuning_results.loc["value"]["momentum"]]
ann_finalPara["L² regularization"] = [ann_tuning_results.loc["value"]["L² regularization"]]
#ann_finalPara["epsilon"] = [ann_tuning_results.loc["value"]["epsilon"]]
final_hyPara_values["ann"] = ann_finalPara
# Lambda search for GAM
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
if gam_lam_search == "Yes":
st.info("Lambda search")
my_bar = st.progress(0.0)
progress = 0
Y_data_gam = df[response_var]
X_data_gam = df[expl_var]
nos = gam_finalPara["number of splines"][0]
so = gam_finalPara["spline order"][0]
lams = gam_lam_values
if response_var_type == "continuous":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if response_var_type == "binary":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
progress += 1
my_bar.progress(progress/1)
# Model validation
if do_modval == "Yes":
model_val_results = ml.model_val(df, sb_ML_alg, MLR_model, train_frac, val_runs, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara, MLR_finalPara, LR_finalPara)
# Full model (depending on prediction for new data)
if do_modprednew == "Yes":
if new_data_pred is not None:
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
if do_modprednew == "No":
df_new = pd.DataFrame()
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
# Success message
st.success('Models run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# ML OUTPUT
# Show only if models were run (no further widgets after run models or the full page reloads)
if run_models == True:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.beta_expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
if response_var_type == "continuous":
st.altair_chart(correlation_plot, use_container_width = True)
if response_var_type == "binary":
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_partDep")))
st.write("")
# Further graphical output
fm_gam_figs4_col1, fm_gam_figs4_col2 = st.beta_columns(2)
with fm_gam_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["GAM fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_gam_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Generalized Additive Models"]
residuals_fitted_data["Fitted"] = model_full_results["GAM fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_obsResVsFit")))
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
rf_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Random Forest"]
rf_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Random Forest"]
rf_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Random Forest"]
rf_error_est.loc["Residual SE"] = model_full_results["RF Residual SE"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat")))
st.write("")
# Variable importance (via permutation)
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep")))
st.write("")
# Further graphical output
fm_rf_figs4_col1, fm_rf_figs4_col2 = st.beta_columns(2)
with fm_rf_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["RF fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_rf_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Random Forest"]
residuals_fitted_data["Fitted"] = model_full_results["RF fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_obsResVsFit")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
brt_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Boosted Regression Trees"]
brt_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Boosted Regression Trees"]
brt_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Boosted Regression Trees"]
brt_error_est.loc["Residual SE"] = model_full_results["BRT Residual SE"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat")))
st.write("")
# Training score (MSE vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training MSE"])
train_score["Training MSE"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training MSE", title = "training MSE", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training MSE", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
# Variable importance (via permutation)
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep")))
st.write("")
# Further graphical output
fm_brt_figs4_col1, fm_brt_figs4_col2 = st.beta_columns(2)
with fm_brt_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["BRT fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_brt_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Boosted Regression Trees"]
residuals_fitted_data["Fitted"] = model_full_results["BRT fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_obsResVsFit")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE", "Best loss"], columns = ["Value"])
ann_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Artificial Neural Networks"]
ann_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Artificial Neural Networks"]
ann_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Artificial Neural Networks"]
ann_error_est.loc["Residual SE"] = model_full_results["ANN Residual SE"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep")))
st.write("")
# Further graphical output
fm_ann_figs3_col1, fm_ann_figs3_col2 = st.beta_columns(2)
with fm_ann_figs3_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["ANN fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_ann_figs3_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Artificial Neural Networks"]
residuals_fitted_data["Fitted"] = model_full_results["ANN fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_obsResVsFit")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
st.markdown("**Model comparison**")
st.write("Performance metrics:")
model_comp_sort_enable = (model_full_results["model comparison"]).transpose()
st.write(model_comp_sort_enable.style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompPerf")))
st.write("")
model_full_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_full_res.loc["min"][m] = model_full_results["residuals"][m].min()
model_full_res.loc["25%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.25)
model_full_res.loc["median"][m] = model_full_results["residuals"][m].quantile(q = 0.5)
model_full_res.loc["75%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.75)
model_full_res.loc["max"][m] = model_full_results["residuals"][m].max()
st.write("Residuals distribution:")
st.write((model_full_res).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompRes")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_comp_sort_enable.to_excel(excel_file, sheet_name="performance_metrics")
model_full_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# LR specific output
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.markdown("**Logistic Regression**")
# Regression information
fm_lr_reg_col1, fm_lr_reg_col2 = st.beta_columns(2)
with fm_lr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["LR information"].style.set_precision(user_precision))
# Regression statistics
with fm_lr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["LR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["LR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_coef")))
st.write("")
# Variable importance (via permutation)
fm_lr_fig1_col1, fm_lr_fig1_col2 = st.beta_columns(2)
with fm_lr_fig1_col1:
st.write("Variable importance (via permutation):")
lr_varImp_table = model_full_results["LR variable importance"]
st.table(lr_varImp_table.style.set_precision(user_precision))
with fm_lr_fig1_col2:
st.write("")
st.write("")
st.write("")
lr_varImp_plot_data = model_full_results["LR variable importance"]
lr_varImp_plot_data["Variable"] = lr_varImp_plot_data.index
lr_varImp = alt.Chart(lr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(lr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_varImp")))
st.write("")
fm_lr_fig_col1, fm_lr_fig_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_lr_fig_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["LR fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 2, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_lr_fig_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["LR ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["LR ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Logistic Regression"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_thresAUC")))
st.write("")
# Partial probabilities
st.write("Partial probability plots:")
fm_lr_figs2_col1, fm_lr_figs2_col2 = st.beta_columns(2)
for pp_var in expl_var:
pp_data = pd.DataFrame(columns = [pp_var])
pp_data[pp_var] = model_full_results["LR partial probabilities"][pp_var][pp_var]
pp_data["ProbabilityOfOccurrence"] = model_full_results["LR partial probabilities"][pp_var]["prediction"]
pp_data["Observed"] = df[response_var]
pp_chart = alt.Chart(pp_data, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("ProbabilityOfOccurrence", title = "probability of occurrence", scale = alt.Scale(domain = [0, 1]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["ProbabilityOfOccurrence"] + [pp_var]
)
obs_data_plot = alt.Chart(pp_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence"] + [pp_var]
)
if expl_var.index(pp_var)%2 == 0:
with fm_lr_figs2_col1:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if expl_var.index(pp_var)%2 == 1:
with fm_lr_figs2_col2:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_partProb")))
# Download link for LR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["LR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["LR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["LR coefficients"].to_excel(excel_file, sheet_name="coefficients")
lr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "LR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Logistic Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat_bin")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig_bin")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp_bin")))
st.write("")
# Observed vs. Probability of Occurrence
fm_gam_figs5_col1, fm_gam_figs5_col2 = st.beta_columns(2)
with fm_gam_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["GAM fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[0]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Generalized Additive Models"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_gam_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["GAM ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["GAM ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Generalized Additive Models"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Generalized Additive Models"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep_bin")))
st.write("")
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS"], columns = ["Value"])
rf_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Random Forest"]
rf_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Random Forest"]
rf_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Random Forest"]
rf_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Random Forest"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat_bin")))
st.write("")
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp_bin")))
st.write("")
fm_rf_figs5_col1, fm_rf_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_rf_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["RF fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Random Forest"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_rf_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["RF ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["RF ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Random Forest"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Random Forest"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep_bin")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS"], columns = ["Value"])
brt_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Boosted Regression Trees"]
brt_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Boosted Regression Trees"]
brt_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Boosted Regression Trees"]
brt_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Boosted Regression Trees"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat_bin")))
st.write("")
# Training score (deviance vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training deviance"])
train_score["Training deviance"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training deviance", title = "training deviance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training deviance", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp_bin")))
st.write("")
fm_brt_figs5_col1, fm_brt_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_brt_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["BRT fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Boosted Regression Trees"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_brt_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["BRT ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["BRT ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Boosted Regression Trees"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Boosted Regression Trees"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep_bin")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS", "Best loss"], columns = ["Value"])
ann_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Artificial Neural Networks"]
ann_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Artificial Neural Networks"]
ann_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Artificial Neural Networks"]
ann_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Artificial Neural Networks"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat_bin")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp_bin")))
st.write("")
fm_ann_figs5_col1, fm_ann_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_ann_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["ANN fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Artificial Neural Networks"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_ann_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["ANN ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["ANN ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Artificial Neural Networks"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Artificial Neural Networks"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep_bin")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
if any(a for a in sb_ML_alg if a == "Logistic Regression" or a == "Random Forest" or a == "Generalized Additive Models" or a == "Boosted Regression Trees" or a == "Artificial Neural Networks"):
st.markdown("**Model comparison**")
st.write("Threshold-independent metrics:")
st.write((model_full_results["model comparison thresInd"]).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompThresInd")))
st.write("")
st.write("Thresholds:")
st.table(model_full_results["model comparison thres"].transpose().style.set_precision(user_precision))
st.write("")
st.write("Threshold-dependent metrics:")
st.write((model_full_results["model comparison thresDep"]).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompThresDep")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["model comparison thresInd"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics")
model_full_results["model comparison thres"].to_excel(excel_file, sheet_name="thresholds")
model_full_results["model comparison thresDep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# FULL MODEL PREDICTIONS
prediction_output = st.beta_expander("Full model predictions", expanded = False)
with prediction_output:
if model_full_results is not None:
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
MLR_pred_orig = pd.DataFrame(columns = [response_var])
MLR_pred_orig[response_var] = model_full_results["MLR fitted"]
st.write(MLR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
MLR_pred_new = pd.DataFrame(columns = [response_var])
MLR_pred_new[response_var] = model_full_results["MLR prediction"]
st.write(MLR_pred_new.style.set_precision(user_precision))
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
GAM_pred_orig = pd.DataFrame(columns = [response_var])
GAM_pred_orig[response_var] = model_full_results["GAM fitted"]
st.write(GAM_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
GAM_pred_new = pd.DataFrame(columns = [response_var])
GAM_pred_new[response_var] = model_full_results["GAM prediction"]
st.write(GAM_pred_new.style.set_precision(user_precision))
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
RF_pred_orig = pd.DataFrame(columns = [response_var])
RF_pred_orig[response_var] = model_full_results["RF fitted"]
st.write(RF_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
RF_pred_new = pd.DataFrame(columns = [response_var])
RF_pred_new[response_var] = model_full_results["RF prediction"]
st.write(RF_pred_new.style.set_precision(user_precision))
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
BRT_pred_orig = pd.DataFrame(columns = [response_var])
BRT_pred_orig[response_var] = model_full_results["BRT fitted"]
st.write(BRT_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
BRT_pred_new = pd.DataFrame(columns = [response_var])
BRT_pred_new[response_var] = model_full_results["BRT prediction"]
st.write(BRT_pred_new.style.set_precision(user_precision))
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
ANN_pred_orig = pd.DataFrame(columns = [response_var])
ANN_pred_orig[response_var] = model_full_results["ANN fitted"]
st.write(ANN_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
ANN_pred_new = pd.DataFrame(columns = [response_var])
ANN_pred_new[response_var] = model_full_results["ANN prediction"]
st.write(ANN_pred_new.style.set_precision(user_precision))
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
MLR_pred_orig = pd.DataFrame(columns = [response_var])
MLR_pred_orig[response_var] = model_full_results["MLR fitted"]
st.write(MLR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
MLR_pred_new = pd.DataFrame(columns = [response_var])
MLR_pred_new[response_var] = model_full_results["MLR prediction"]
st.write(MLR_pred_new.style.set_precision(user_precision))
st.write("")
# LR specific output
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.markdown("**Logistic Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
LR_pred_orig = pd.DataFrame(columns = [response_var])
LR_pred_orig[response_var] = model_full_results["LR fitted"][:, 1]
LR_pred_orig[response_var + "_binary"] = model_full_results["LR fitted binary"]
st.write(LR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
LR_pred_new = pd.DataFrame(columns = [response_var])
LR_pred_new[response_var] = model_full_results["LR prediction"][:, 1]
LR_pred_new[response_var + "_binary"] = model_full_results["LR prediction binary"]
st.write(LR_pred_new.style.set_precision(user_precision))
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
GAM_pred_orig = pd.DataFrame(columns = [response_var])
GAM_pred_orig[response_var] = model_full_results["GAM fitted"]
GAM_pred_orig[response_var + "_binary"] = model_full_results["GAM fitted binary"]
st.write(GAM_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
GAM_pred_new = pd.DataFrame(columns = [response_var])
GAM_pred_new[response_var] = model_full_results["GAM prediction"]
GAM_pred_new[response_var + "_binary"] = model_full_results["GAM prediction binary"]
st.write(GAM_pred_new.style.set_precision(user_precision))
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
RF_pred_orig = pd.DataFrame(columns = [response_var])
RF_pred_orig[response_var] = model_full_results["RF fitted"][:, 1]
RF_pred_orig[response_var + "_binary"] = model_full_results["RF fitted binary"]
st.write(RF_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
RF_pred_new = pd.DataFrame(columns = [response_var])
RF_pred_new[response_var] = model_full_results["RF prediction"][:, 1]
RF_pred_new[response_var + "_binary"] = model_full_results["RF prediction binary"]
st.write(RF_pred_new.style.set_precision(user_precision))
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
BRT_pred_orig = pd.DataFrame(columns = [response_var])
BRT_pred_orig[response_var] = model_full_results["BRT fitted"][:, 1]
BRT_pred_orig[response_var + "_binary"] = model_full_results["BRT fitted binary"]
st.write(BRT_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
BRT_pred_new = pd.DataFrame(columns = [response_var])
BRT_pred_new[response_var] = model_full_results["BRT prediction"][:, 1]
BRT_pred_new[response_var + "_binary"] = model_full_results["BRT prediction binary"]
st.write(BRT_pred_new.style.set_precision(user_precision))
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
ANN_pred_orig = pd.DataFrame(columns = [response_var])
ANN_pred_orig[response_var] = model_full_results["ANN fitted"][:, 1]
ANN_pred_orig[response_var + "_binary"] = model_full_results["ANN fitted binary"]
st.write(ANN_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
ANN_pred_new = pd.DataFrame(columns = [response_var])
ANN_pred_new[response_var] = model_full_results["ANN prediction"][:, 1]
ANN_pred_new[response_var + "_binary"] = model_full_results["ANN prediction binary"]
st.write(ANN_pred_new.style.set_precision(user_precision))
st.write("")
#-------------------------------------------------------------
st.write("")
# Download links for prediction data
output = BytesIO()
predictions_excel = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_pred_orig.to_excel(predictions_excel, sheet_name="MLR_pred_orig")
if do_modprednew == "Yes":
MLR_pred_new.to_excel(predictions_excel, sheet_name="MLR_pred_new")
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_pred_orig.to_excel(predictions_excel, sheet_name="LR_pred_orig")
if do_modprednew == "Yes":
LR_pred_new.to_excel(predictions_excel, sheet_name="LR_pred_new")
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
GAM_pred_orig.to_excel(predictions_excel, sheet_name="GAM_pred_orig")
if do_modprednew == "Yes":
GAM_pred_new.to_excel(predictions_excel, sheet_name="GAM_pred_new")
if any(a for a in sb_ML_alg if a == "Random Forest"):
RF_pred_orig.to_excel(predictions_excel, sheet_name="RF_pred_orig")
if do_modprednew == "Yes":
RF_pred_new.to_excel(predictions_excel, sheet_name="RF_pred_new")
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
BRT_pred_orig.to_excel(predictions_excel, sheet_name="BRT_pred_orig")
if do_modprednew == "Yes":
BRT_pred_new.to_excel(predictions_excel, sheet_name="BRT_pred_new")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ANN_pred_orig.to_excel(predictions_excel, sheet_name="ANN_pred_orig")
if do_modprednew == "Yes":
ANN_pred_new.to_excel(predictions_excel, sheet_name="ANN_pred_new")
predictions_excel.save()
predictions_excel = output.getvalue()
b64 = base64.b64encode(predictions_excel)
dl_file_name = "Full model predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/predictions_excel;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download full model predictions</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# VALIDATION OUTPUT
if do_modval == "Yes":
val_output = st.beta_expander("Validation output", expanded = False)
with val_output:
if model_val_results is not None:
#------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# Metrics means
st.write("Means of metrics across validation runs:")
st.write(model_val_results["mean"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means")))
# Metrics sd
st.write("SDs of metrics across validation runs:")
st.write(model_val_results["sd"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds")))
st.write("")
st.write("")
val_col1, val_col2 = st.beta_columns(2)
with val_col1:
# Residuals boxplot
if model_val_results["residuals"] is not None:
st.write("Boxplot of residuals across validation runs:")
residual_results = model_val_results["residuals"]
residuals_bplot = pd.melt(residual_results, ignore_index = False, var_name = "Algorithm", value_name = "Residuals")
residuals_boxchart = alt.Chart(residuals_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Residuals", title = "residuals", scale = alt.Scale(zero = False)),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
residuals_plot = residuals_boxchart #+ residuals_scatter
st.altair_chart(residuals_plot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_resBoxplot")))
with val_col2:
# Variance explained boxplot
if model_val_results["variance explained"] is not None:
st.write("Boxplot of % VE across validation runs:")
ve_results = model_val_results["variance explained"]
ve_bplot = pd.melt(ve_results, ignore_index = False, var_name = "Algorithm", value_name = "% VE")
ve_boxchart = alt.Chart(ve_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("% VE", scale = alt.Scale(domain = [min(ve_bplot["% VE"]), max(ve_bplot["% VE"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(ve_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_VEBoxplot")))
st.write("")
# Variable importance (via permutation)
st.write("Means of variable importances:")
varImp_table_mean = model_val_results["variable importance mean"]
st.write(varImp_table_mean.transpose().style.set_precision(user_precision))
st.write("SDs of variable importances:")
varImp_table_sd = model_val_results["variable importance sd"]
st.write(varImp_table_sd.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_varImp")))
st.write("")
st.write("")
# Residuals
if model_val_results["residuals"] is not None:
model_val_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_val_res.loc["min"][m] = model_val_results["residuals"][m].min()
model_val_res.loc["25%-Q"][m] = model_val_results["residuals"][m].quantile(q = 0.25)
model_val_res.loc["median"][m] = model_val_results["residuals"][m].quantile(q = 0.5)
model_val_res.loc["75%-Q"][m] = model_val_results["residuals"][m].quantile(q = 0.75)
model_val_res.loc["max"][m] = model_val_results["residuals"][m].max()
st.write("Residuals distribution across all validation runs:")
st.write(model_val_res.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_res")))
st.write("")
# Download link for validation output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_val_results["mean"].transpose().to_excel(excel_file, sheet_name="performance_metrics_mean")
model_val_results["sd"].transpose().to_excel(excel_file, sheet_name="performance_metrics_sd")
varImp_table_mean.to_excel(excel_file, sheet_name="variable_importance_mean")
varImp_table_sd.to_excel(excel_file, sheet_name="variable_importance_sd")
model_val_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Validation output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download validation output</a>
""",
unsafe_allow_html=True)
st.write("")
#------------------------------------
# Binary response variable
if response_var_type == "binary":
if model_val_results["mean_ind"].empty:
st.warning("Please select an additional algorithm besides Multiple Linear Regression!")
# Metrics (independent)
if model_val_results["mean_ind"].empty:
st.write("")
else:
st.write("Means of threshold-independent metrics across validation runs:")
st.write(model_val_results["mean_ind"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means_thresInd")))
# Metrics (independent)
if model_val_results["sd_ind"].empty:
st.write("")
else:
st.write("SDs of threshold-independent metrics across validation runs:")
st.write(model_val_results["sd_ind"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds_thresInd")))
st.write("")
st.write("")
val_col1, val_col2 = st.beta_columns(2)
with val_col1:
# AUC ROC boxplot
if model_val_results["AUC ROC"].empty:
st.write("")
else:
st.write("Boxplot of AUC ROC across validation runs:")
auc_results = model_val_results["AUC ROC"]
auc_bplot = pd.melt(auc_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
auc_boxchart = alt.Chart(auc_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "AUC ROC", scale = alt.Scale(domain = [min(auc_bplot["Value"]), max(auc_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(auc_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_AUCBoxplot")))
with val_col2:
# TSS boxplot
if model_val_results["TSS"].empty:
st.write("")
else:
st.write("Boxplot of TSS across validation runs:")
tss_results = model_val_results["TSS"]
tss_bplot = pd.melt(tss_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
tss_boxchart = alt.Chart(tss_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "TSS", scale = alt.Scale(domain = [min(tss_bplot["Value"]), max(tss_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(tss_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_TSSBoxplot")))
st.write("")
# Variable importance
st.write("Means of variable importances:")
varImp_table_mean = model_val_results["variable importance mean"]
st.write(varImp_table_mean.style.set_precision(user_precision))
st.write("SDs of variable importances:")
varImp_table_sd = model_val_results["variable importance sd"]
st.write(varImp_table_sd.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_varImp_bin")))
st.write("")
st.write("")
# Metrics (dependent)
if model_val_results["mean_dep"].empty:
st.write("")
else:
st.write("Means of threshold-dependent metrics across validation runs:")
st.write(model_val_results["mean_dep"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means_thresDep")))
# Metrics (dependent)
if model_val_results["sd_dep"].empty:
st.write("")
else:
st.write("SDs of threshold-dependent metrics across validation runs:")
st.write(model_val_results["sd_dep"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds_thresDep")))
st.write("")
# Download link for validation output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_val_results["mean_ind"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics_mean")
model_val_results["sd_ind"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics_sd")
varImp_table_mean.to_excel(excel_file, sheet_name="variable_importance_mean")
varImp_table_sd.to_excel(excel_file, sheet_name="variable_importance_sd")
model_val_results["mean_dep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics_mean")
model_val_results["sd_dep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics_sd")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Validation output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download validation output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER-TUNING OUTPUT
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
if do_hypTune == "Yes":
hype_title = "Hyperparameter-tuning output"
if do_hypTune != "Yes":
hype_title = "Hyperparameter output"
hype_output = st.beta_expander(hype_title, expanded = False)
with hype_output:
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
# Final hyperparameters
if rf_finalPara is not None:
st.write("Final hyperparameters:")
st.table(rf_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if rf_tuning_results is not None and rf_finalPara is not None:
st.write("Tuning details:")
rf_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
rf_finalTuneMetrics["scoring metric"] = [rf_tuning_results.loc["value"]["scoring"]]
rf_finalTuneMetrics["number of models"] = [rf_tuning_results.loc["value"]["number of models"]]
rf_finalTuneMetrics["mean cv score"] = [rf_tuning_results.loc["value"]["mean score"]]
rf_finalTuneMetrics["standard deviation cv score"] = [rf_tuning_results.loc["value"]["std score"]]
rf_finalTuneMetrics["test data score"] = [rf_tuning_results.loc["value"]["test score"]]
st.table(rf_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_details")))
st.write("")
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
# Final hyperparameters
if brt_finalPara is not None:
st.write("Final hyperparameters:")
st.table(brt_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if brt_tuning_results is not None and brt_finalPara is not None:
st.write("Tuning details:")
brt_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
brt_finalTuneMetrics["scoring metric"] = [brt_tuning_results.loc["value"]["scoring"]]
brt_finalTuneMetrics["number of models"] = [brt_tuning_results.loc["value"]["number of models"]]
brt_finalTuneMetrics["mean cv score"] = [brt_tuning_results.loc["value"]["mean score"]]
brt_finalTuneMetrics["standard deviation cv score"] = [brt_tuning_results.loc["value"]["std score"]]
brt_finalTuneMetrics["test data score"] = [brt_tuning_results.loc["value"]["test score"]]
st.table(brt_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_details")))
st.write("")
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
# Final hyperparameters
if ann_finalPara is not None:
st.write("Final hyperparameters:")
st.table(ann_finalPara.transpose().style.format({"L² regularization": "{:.5}"}))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if ann_tuning_results is not None and ann_finalPara is not None:
st.write("Tuning details:")
ann_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
ann_finalTuneMetrics["scoring metric"] = [ann_tuning_results.loc["value"]["scoring"]]
ann_finalTuneMetrics["number of models"] = [ann_tuning_results.loc["value"]["number of models"]]
ann_finalTuneMetrics["mean cv score"] = [ann_tuning_results.loc["value"]["mean score"]]
ann_finalTuneMetrics["standard deviation cv score"] = [ann_tuning_results.loc["value"]["std score"]]
ann_finalTuneMetrics["test data score"] = [ann_tuning_results.loc["value"]["test score"]]
st.table(ann_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_details")))
st.write("")
# Download link for hyperparameter output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara.to_excel(excel_file, sheet_name="RF_final_hyperparameters")
if do_hypTune == "Yes":
rf_finalTuneMetrics.to_excel(excel_file, sheet_name="RF_tuning_details")
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara.to_excel(excel_file, sheet_name="BRT_final_hyperparameters")
if do_hypTune == "Yes":
brt_finalTuneMetrics.to_excel(excel_file, sheet_name="BRT_tuning_details")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara.to_excel(excel_file, sheet_name="ANN_final_hyperparameters")
if do_hypTune == "Yes":
ann_finalTuneMetrics.to_excel(excel_file, sheet_name="ANN_tuning_details")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
if do_hypTune == "Yes":
dl_file_name = "Hyperparameter-tuning output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter-tuning output</a>
""",
unsafe_allow_html=True)
if do_hypTune != "Yes":
dl_file_name = "Hyperparameter output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter output</a>
""",
unsafe_allow_html=True)
st.write("")
#------------------------------------------------------------------------------------------
# MULTI-CLASS CLASSIFICATION
if analysis_type == "Multi-class classification":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container2 = st.beta_container()
with data_machinelearning_container2:
st.header("**Multi-class classification**")
st.markdown("Go for creating predictive models of your data using machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models (same as for regression, bc same functions are used)
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_type = "multi-class"
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check how many classes the response variable has (max: 10 classes)
if len(pd.unique(df[response_var])) > 10:
st.error("ERROR: Your response variable has more than 10 classes. Please select a variable with less classes!")
return
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please select a numeric multi-class response variable!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric multi-class response variable!"
elif var_cat.loc[response_var] == "numeric" and df[response_var].dtypes == "float64":
response_var_message_num = "ERROR: Please select a multi-class response variable!"
elif var_cat.loc[response_var] == "binary":
response_var_message_num = "ERROR: Please select a multi-class response variable!"
elif var_cat.loc[response_var] == "numeric" and df[response_var].dtypes == "int64":
response_var_message_cat = "WARNING: Please check whether your response variable is indeed a multi-class variable!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms
algorithms = ["Random Forest", "Artificial Neural Networks"]
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
st.markdown("**Model-specific settings**")
# Logistic Regression settings
# if any(a for a in sb_ML_alg if a == "Logistic Regression"):
# LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
# LR_intercept = "Yes"
# LR_cov_type = "non-robust"
# LR_finalPara["intercept"] = LR_intercept
# LR_finalPara["covType"] = LR_cov_type
# if st.checkbox("Adjust settings for Logistic Regression"):
# col1, col2 = st.beta_columns(2)
# with col1:
# LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
# with col2:
# LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
# LR_finalPara["intercept"] = LR_intercept
# LR_finalPara["covType"] = LR_cov_type
# st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
else:
st.info("All variables are available for predictions!")
# Check if NAs are present and delete them automatically
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show machine learning summary
if st.checkbox('Show a summary of machine learning settings', value = False):
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.write("Algorithms summary:")
st.write("- Models:", ', '.join(sb_ML_alg))
# if any(a for a in sb_ML_alg if a == "Logistic Regression"):
# st.write("- Logistic Regression including intercept: ", LR_intercept)
# st.write("- Logistic Regression covariance type: ", LR_cov_type)
if any(a for a in sb_ML_alg if a == "Random Forest") and do_hypTune == "No":
st.write("- Random Forest parameters: ")
st.write(rf_finalPara)
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks") and do_hypTune == "No":
st.write("- Artificial Neural Networks parameters: ")
st.write(ann_finalPara)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# Hyperparameter settings summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks" or a == "Boosted Regression Trees" or a == "Random Forest"):
st.write("Hyperparameter-tuning settings summary:")
if do_hypTune == "No":
st.write("- ", do_hypTune_no)
st.write("")
if do_hypTune == "Yes":
st.write("- Search method:", hypTune_method)
st.write("- ", hypTune_nCV, "-fold cross-validation")
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
st.write("- ", hypTune_iter, "iterations in search")
st.write("")
# Random Forest summary
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.write("Random Forest tuning settings summary:")
st.write(rf_tunePara)
# Artificial Neural Networks summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.write("Artificial Neural Networks tuning settings summary:")
st.write(ann_tunePara.style.format({"L² regularization": "{:.5}"}))
#st.caption("** Learning rate is only used in adam")
st.write("")
# General settings summary
st.write("General settings summary:")
st.write("- Response variable type: ", response_var_type)
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run models")
st.write("")
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
#Hyperparameter
if do_hypTune == "Yes":
# Tuning
model_tuning_results = ml.model_tuning(df, sb_ML_alg, hypTune_method, hypTune_iter, hypTune_nCV, hyPara_values, response_var_type, response_var, expl_var)
# Save final hyperparameters
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tuning_results = model_tuning_results["rf tuning"]
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [rf_tuning_results.loc["value"]["number of trees"]]
if [rf_tuning_results.loc["value"]["maximum tree depth"]][0] == "None":
rf_finalPara["maximum tree depth"] = None
else:
rf_finalPara["maximum tree depth"] = [rf_tuning_results.loc["value"]["maximum tree depth"]]
rf_finalPara["maximum number of features"] = [rf_tuning_results.loc["value"]["maximum number of features"]]
rf_finalPara["sample rate"] = [rf_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["rf"] = rf_finalPara
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tuning_results = model_tuning_results["ann tuning"]
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"]) #"learning rate schedule", "momentum", "epsilon"])
ann_finalPara["weight optimization solver"] = [ann_tuning_results.loc["value"]["weight optimization solver"]]
ann_finalPara["maximum number of iterations"] = [ann_tuning_results.loc["value"]["maximum number of iterations"]]
ann_finalPara["activation function"] = [ann_tuning_results.loc["value"]["activation function"]]
ann_finalPara["hidden layer sizes"] = [ann_tuning_results.loc["value"]["hidden layer sizes"]]
ann_finalPara["learning rate"] = [ann_tuning_results.loc["value"]["learning rate"]]
#ann_finalPara["learning rate schedule"] = [ann_tuning_results.loc["value"]["learning rate schedule"]]
#ann_finalPara["momentum"] = [ann_tuning_results.loc["value"]["momentum"]]
ann_finalPara["L² regularization"] = [ann_tuning_results.loc["value"]["L² regularization"]]
#ann_finalPara["epsilon"] = [ann_tuning_results.loc["value"]["epsilon"]]
final_hyPara_values["ann"] = ann_finalPara
# Model validation
if do_modval == "Yes":
model_val_results = ml.model_val(df, sb_ML_alg, MLR_model, train_frac, val_runs, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara, MLR_finalPara, LR_finalPara)
# Full model (depending on prediction for new data)
if do_modprednew == "Yes":
if new_data_pred is not None:
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
if do_modprednew == "No":
df_new = pd.DataFrame()
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
# Success message
st.success('Models run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# ML OUTPUT
# Show only if models were run (no further widgets after run models or the full page reloads)
if run_models == True:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.beta_expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Multi-class response variable
if response_var_type == "multi-class":
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["ACC", "BAL ACC"], columns = ["Value"])
rf_error_est.loc["ACC"] = model_full_results["model comparison"].loc["ACC"]["Random Forest"]
rf_error_est.loc["BAL ACC"] = model_full_results["model comparison"].loc["BAL ACC"]["Random Forest"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat_mult")))
st.write("")
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp_mult")))
st.write("")
# Partial dependence plots
# st.write("Partial dependence plots:")
# fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
# for pd_var in expl_var:
# pd_data_rf = pd.DataFrame(columns = [pd_var])
# pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
# pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
# pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
# x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# tooltip = ["Partial dependence"] + [pd_var]
# )
# pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
# pd_data_ticks_rf[pd_var] = df[pd_var]
# pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
# pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
# x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
# y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# tooltip = [pd_var]
# )
# if expl_var.index(pd_var)%2 == 0:
# with fm_rf_figs3_col1:
# st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
# if expl_var.index(pd_var)%2 == 1:
# with fm_rf_figs3_col2:
# st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
# if sett_hints:
# st.info(str(fc.learning_hints("mod_md_RF_partDep_bin")))
# Confusion matrix
st.write("Confusion matrix (rows correspond to predictions):")
st.table(model_full_results["RF confusion"])
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_confu_mult")))
st.write("")
# Classification report
st.write("Classification report:")
st.table(model_full_results["RF classification report"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_classRep_mult")))
st.write("")
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="classification_information")
rf_error_est.to_excel(excel_file, sheet_name="classification_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
model_full_results["RF confusion"].to_excel(excel_file, sheet_name="confusion_matrix")
model_full_results["RF classification report"].to_excel(excel_file, sheet_name="classification_report")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["ACC", "BAL ACC", "Best loss"], columns = ["Value"])
ann_error_est.loc["ACC"] = model_full_results["model comparison"].loc["ACC"]["Artificial Neural Networks"]
ann_error_est.loc["BAL ACC"] = model_full_results["model comparison"].loc["BAL ACC"]["Artificial Neural Networks"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat_mult")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp_bin")))
st.write("")
# Partial dependence plots
# st.write("Partial dependence plots:")
# fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
# for pd_var in expl_var:
# pd_data_ann = pd.DataFrame(columns = [pd_var])
# pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
# pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
# pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
# x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# tooltip = ["Partial dependence"] + [pd_var]
# )
# pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
# pd_data_ticks_ann[pd_var] = df[pd_var]
# pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
# pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
# x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
# y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
# tooltip = [pd_var]
# )
# if expl_var.index(pd_var)%2 == 0:
# with fm_ann_figs2_col1:
# st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
# if expl_var.index(pd_var)%2 == 1:
# with fm_ann_figs2_col2:
# st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
# if sett_hints:
# st.info(str(fc.learning_hints("mod_md_ANN_partDep_bin")))
# Confusion matrix
st.write("Confusion matrix (rows correspond to predictions):")
st.table(model_full_results["ANN confusion"])
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_confu_mult")))
st.write("")
# Classification report
st.write("Classification report:")
st.table(model_full_results["ANN classification report"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_classRep_mult")))
st.write("")
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="classification_information")
ann_error_est.to_excel(excel_file, sheet_name="classification_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
model_full_results["ANN confusion"].to_excel(excel_file, sheet_name="confusion_matrix")
model_full_results["ANN classification report"].to_excel(excel_file, sheet_name="classification_report")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
if any(a for a in sb_ML_alg if a == "Random Forest" or a == "Artificial Neural Networks"):
st.markdown("**Model comparison**")
st.write((model_full_results["model comparison"]).transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modComp_mult")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["model comparison"].transpose().to_excel(excel_file, sheet_name="model_comparison")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# FULL MODEL PREDICTIONS
prediction_output = st.beta_expander("Full model predictions", expanded = False)
with prediction_output:
if model_full_results is not None:
#-------------------------------------------------------------
# Multi-class response variable
if response_var_type == "multi-class":
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
RF_pred_orig = pd.DataFrame(columns = [response_var])
RF_pred_orig[response_var] = model_full_results["RF fitted"]
RF_pred_orig = RF_pred_orig.join(pd.DataFrame(model_full_results["RF fitted proba"]))
st.write(RF_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
RF_pred_new = pd.DataFrame(columns = [response_var])
RF_pred_new[response_var] = model_full_results["RF prediction"]
RF_pred_new = RF_pred_new.join(pd.DataFrame(model_full_results["RF prediction proba"]))
st.write(RF_pred_new.style.set_precision(user_precision))
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
ANN_pred_orig = pd.DataFrame(columns = [response_var])
ANN_pred_orig[response_var] = model_full_results["ANN fitted"]
ANN_pred_orig = ANN_pred_orig.join(pd.DataFrame(model_full_results["ANN fitted proba"]))
st.write(ANN_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
ANN_pred_new = pd.DataFrame(columns = [response_var])
ANN_pred_new[response_var] = model_full_results["ANN prediction"]
ANN_pred_new = ANN_pred_new.join(pd.DataFrame(model_full_results["ANN prediction proba"]))
st.write(ANN_pred_new)
st.write("")
#-------------------------------------------------------------
st.write("")
# Download links for prediction data
output = BytesIO()
predictions_excel = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Random Forest"):
RF_pred_orig.to_excel(predictions_excel, sheet_name="RF_pred_orig")
if do_modprednew == "Yes":
RF_pred_new.to_excel(predictions_excel, sheet_name="RF_pred_new")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ANN_pred_orig.to_excel(predictions_excel, sheet_name="ANN_pred_orig")
if do_modprednew == "Yes":
ANN_pred_new.to_excel(predictions_excel, sheet_name="ANN_pred_new")
predictions_excel.save()
predictions_excel = output.getvalue()
b64 = base64.b64encode(predictions_excel)
dl_file_name = "Full model predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/predictions_excel;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download full model predictions</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# VALIDATION OUTPUT
if do_modval == "Yes":
val_output = st.beta_expander("Validation output", expanded = False)
with val_output:
if model_val_results is not None:
#------------------------------------
# Multi-class response variable
if response_var_type == "multi-class":
# Metric
col1, col2 = st.beta_columns(2)
with col1:
if model_val_results["mean"].empty:
st.write("")
else:
st.write("Means across validation runs:")
st.write(model_val_results["mean"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means_mult")))
st.write("")
with col2:
if model_val_results["sd"].empty:
st.write("")
else:
st.write("SDs across validation runs:")
st.write(model_val_results["sd"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds_mult")))
st.write("")
val_col1, val_col2 = st.beta_columns(2)
with val_col1:
# ACC boxplot
if model_val_results["ACC"].empty:
st.write("")
else:
st.write("Boxplot of ACC across validation runs:")
acc_results = model_val_results["ACC"]
acc_bplot = pd.melt(acc_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
acc_boxchart = alt.Chart(acc_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "ACC", scale = alt.Scale(domain = [min(acc_bplot["Value"]), max(acc_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(acc_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_ACCBoxplot")))
with val_col2:
# BAL ACC boxplot
if model_val_results["BAL ACC"].empty:
st.write("")
else:
st.write("Boxplot of BAL ACC across validation runs:")
bal_acc_results = model_val_results["BAL ACC"]
bal_acc_bplot = pd.melt(bal_acc_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
bal_acc_boxchart = alt.Chart(bal_acc_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "BAL ACC", scale = alt.Scale(domain = [min(bal_acc_bplot["Value"]), max(bal_acc_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(bal_acc_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_BALACCBoxplot")))
st.write("")
# Variable importance
st.write("Means of variable importances:")
varImp_table_mean = model_val_results["variable importance mean"]
st.write(varImp_table_mean.style.set_precision(user_precision))
st.write("SDs of variable importances:")
varImp_table_sd = model_val_results["variable importance sd"]
st.write(varImp_table_sd.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_varImp_mult")))
st.write("")
st.write("")
# Download link for validation output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_val_results["mean"].transpose().to_excel(excel_file, sheet_name="metrics_mean")
model_val_results["sd"].transpose().to_excel(excel_file, sheet_name="metrics_sd")
varImp_table_mean.to_excel(excel_file, sheet_name="variable_importance_mean")
varImp_table_sd.to_excel(excel_file, sheet_name="variable_importance_sd")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Validation output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download validation output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER-TUNING OUTPUT
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
if do_hypTune == "Yes":
hype_title = "Hyperparameter-tuning output"
if do_hypTune != "Yes":
hype_title = "Hyperparameter output"
hype_output = st.beta_expander(hype_title, expanded = False)
with hype_output:
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
# Final hyperparameters
if rf_finalPara is not None:
st.write("Final hyperparameters:")
st.table(rf_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if rf_tuning_results is not None and rf_finalPara is not None:
st.write("Tuning details:")
rf_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
rf_finalTuneMetrics["scoring metric"] = [rf_tuning_results.loc["value"]["scoring"]]
rf_finalTuneMetrics["number of models"] = [rf_tuning_results.loc["value"]["number of models"]]
rf_finalTuneMetrics["mean cv score"] = [rf_tuning_results.loc["value"]["mean score"]]
rf_finalTuneMetrics["standard deviation cv score"] = [rf_tuning_results.loc["value"]["std score"]]
rf_finalTuneMetrics["test data score"] = [rf_tuning_results.loc["value"]["test score"]]
st.table(rf_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_details")))
st.write("")
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
# Final hyperparameters
if brt_finalPara is not None:
st.write("Final hyperparameters:")
st.table(brt_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if brt_tuning_results is not None and brt_finalPara is not None:
st.write("Tuning details:")
brt_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
brt_finalTuneMetrics["scoring metric"] = [brt_tuning_results.loc["value"]["scoring"]]
brt_finalTuneMetrics["number of models"] = [brt_tuning_results.loc["value"]["number of models"]]
brt_finalTuneMetrics["mean cv score"] = [brt_tuning_results.loc["value"]["mean score"]]
brt_finalTuneMetrics["standard deviation cv score"] = [brt_tuning_results.loc["value"]["std score"]]
brt_finalTuneMetrics["test data score"] = [brt_tuning_results.loc["value"]["test score"]]
st.table(brt_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_details")))
st.write("")
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
# Final hyperparameters
if ann_finalPara is not None:
st.write("Final hyperparameters:")
st.table(ann_finalPara.transpose().style.format({"L² regularization": "{:.5}"}))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if ann_tuning_results is not None and ann_finalPara is not None:
st.write("Tuning details:")
ann_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
ann_finalTuneMetrics["scoring metric"] = [ann_tuning_results.loc["value"]["scoring"]]
ann_finalTuneMetrics["number of models"] = [ann_tuning_results.loc["value"]["number of models"]]
ann_finalTuneMetrics["mean cv score"] = [ann_tuning_results.loc["value"]["mean score"]]
ann_finalTuneMetrics["standard deviation cv score"] = [ann_tuning_results.loc["value"]["std score"]]
ann_finalTuneMetrics["test data score"] = [ann_tuning_results.loc["value"]["test score"]]
st.table(ann_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_details")))
st.write("")
# Download link for hyperparameter output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara.to_excel(excel_file, sheet_name="RF_final_hyperparameters")
if do_hypTune == "Yes":
rf_finalTuneMetrics.to_excel(excel_file, sheet_name="RF_tuning_details")
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara.to_excel(excel_file, sheet_name="BRT_final_hyperparameters")
if do_hypTune == "Yes":
brt_finalTuneMetrics.to_excel(excel_file, sheet_name="BRT_tuning_details")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara.to_excel(excel_file, sheet_name="ANN_final_hyperparameters")
if do_hypTune == "Yes":
ann_finalTuneMetrics.to_excel(excel_file, sheet_name="ANN_tuning_details")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
if do_hypTune == "Yes":
dl_file_name = "Hyperparameter-tuning output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter-tuning output</a>
""",
unsafe_allow_html=True)
if do_hypTune != "Yes":
dl_file_name = "Hyperparameter output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter output</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# DATA DECOMPOSITION
if analysis_type == "Data decomposition":
#++++++++++++++++++++++++++++++++++++++++++++
# DIMENSIONALITY REDUCTION
st.write("")
st.write("")
data_decomposition_container = st.beta_container()
with data_decomposition_container:
st.header("**Data decomposition**")
st.markdown("STATY will take care of the decomposition for you, so you can put your focus on results interpretation and communication! ")
dd_settings = st.beta_expander("Specify method", expanded = False)
with dd_settings:
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Select variables
var_options = list(df.select_dtypes(['number']).columns)
if len(var_options)>0:
decomp_var = st.multiselect("Select variables for decomposition", var_options, var_options, key = session_state.id)
else:
st.error("ERROR: No numeric variables in dataset!")
return
# Include response variable in output?
resp_var_dec = st.selectbox("Include response variable in transformed data output", ["No", "Yes"], key = session_state.id)
if resp_var_dec == "Yes":
resp_var_options = df.columns
resp_var_options = resp_var_options[resp_var_options.isin(df.drop(decomp_var, axis = 1).columns)]
resp_var = st.selectbox("Select response variable for transformed data output", resp_var_options, key = session_state.id)
# Filter data according to selected variables
if len(decomp_var) < 2:
st.error("ERROR: Please select more than 1 variable!")
return
else:
# Decomposition data set (and response variable)
if resp_var_dec == "Yes":
df = df[list([resp_var]) + list(decomp_var)]
else:
df = df[decomp_var]
# Check if NAs are present and delete them automatically
if np.where(df[decomp_var].isnull())[0].size > 0:
st.warning("WARNING: Your data set includes NAs. Rows with NAs are automatically deleted!")
df = df.dropna()
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify algorithm**")
DEC_alg = st.selectbox("Select decomposition technique", ["Principal Component Analysis", "Factor Analysis"])
if DEC_alg == "Factor Analysis":
# Defaul settings
nfactors = len(decomp_var)
farotation = None
famethod = "ml"
# Adjust settings
if st.checkbox("Adjust Factor Analysis settings"):
col1, col2 = st.beta_columns(2)
with col1:
nfactors = st.number_input("Number of factors", min_value=2, max_value=len(decomp_var), value=len(decomp_var))
farotation = st.selectbox("Rotation", [None, "varimax", "promax", "oblimin", "oblimax", "quartimin", "quartimax", "equamax"])
if farotation == "None":
farotation = None
with col2:
famethod = st.selectbox("Fitting method", ["Maximum Likelihood", "MINRES", "Principal Factor"])
if famethod == "Maximum Likelihood":
famethod = "ml"
elif famethod == "MINRES":
famethod = "minres"
elif famethod == "Principal Factor":
famethod = "principal"
#--------------------------------------------------------------------------------------
# DECOMPOSITION DATA
st.write("")
# Show data
if resp_var_dec == "Yes":
show_data_text = "Show data for response variable and decomposition"
else:
show_data_text = "Show data for decomposition"
if st.checkbox(show_data_text):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for decomposition data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="decomposition_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Decomposition data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download decomposition data</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# RUN DECOMPOSITION
# Decomposition is run on button click
st.write("")
run_decomposition = st.button("Run decomposition")
st.write("")
if run_decomposition:
decomp_results = {}
# Check algorithm
# Principal Component Analysis
if DEC_alg == "Principal Component Analysis":
# Standardize data
X_pca = df[decomp_var]
scaler = StandardScaler()
scaler.fit(X_pca)
X_pca = scaler.transform(X_pca)
# Create components names
components_names = []
for i in range(len(decomp_var)):
components_names.append("pc_" + str(i+1))
# Adjust index and column names
X_pca = pd.DataFrame(X_pca, index = df[decomp_var].index, columns = df[decomp_var].columns)
# Fit PCA
pca = decomposition.PCA()
pca.fit(X_pca)
# Transform data
X_pca_transform = pca.transform(X_pca)
# Adjust index and column names
X_pca_transform = pd.DataFrame(X_pca_transform, index = df[decomp_var].index, columns = components_names)
# Add response variable if wanted
if resp_var_dec == "Yes":
X_pca_transform[resp_var] = df[resp_var]
# Save results
EVEV = pd.DataFrame(pca.explained_variance_, index = components_names, columns = ["eigenvalue"])
EVEV["explained variance ratio"] = pca.explained_variance_ratio_
EVEV["cumulative explained variance"] = np.cumsum(pca.explained_variance_ratio_)
decomp_results["transformed data"] = X_pca_transform
decomp_results["eigenvalues and explained variance"] = EVEV
decomp_results["eigenvectors"] = pd.DataFrame(pca.components_.T, index = decomp_var, columns = components_names)
# Factor Analysis
if DEC_alg == "Factor Analysis":
# Standardize data
X_fa = df[decomp_var]
scaler = StandardScaler()
scaler.fit(X_fa)
X_fa = scaler.transform(X_fa)
# Create components names
components_names1 = []
for i in range(len(decomp_var)):
components_names1.append("factor_" + str(i+1))
components_names2 = []
for i in range(nfactors):
components_names2.append("factor_" + str(i+1))
# Adjust index and column names
X_fa = pd.DataFrame(X_fa, index = df[decomp_var].index, columns = df[decomp_var].columns)
# Fit FA
fa = FactorAnalyzer(n_factors=nfactors, rotation= farotation, method= famethod)
fa.fit(X_fa)
# Transform data
X_fa_transform = fa.transform(X_fa)
# Adjust index and column names
X_fa_transform = pd.DataFrame(X_fa_transform, index = df[decomp_var].index, columns = components_names2)
# Add response variable if wanted
if resp_var_dec == "Yes":
X_fa_transform[resp_var] = df[resp_var]
# Save results
BST = pd.DataFrame(index = ["statistic", "dof", "p-value"], columns = ["value"])
BST.loc["statistic"] = -np.log(np.linalg.det(X_fa.corr()))* (X_fa.shape[0] - 1 - (2 * X_fa.shape[1] + 5) / 6)
BST.loc["dof"] = X_fa.shape[1] * (X_fa.shape[1] - 1) / 2
BST.loc["p-value"] = stats.chi2.sf(BST.loc["statistic"][0], BST.loc["dof"][0])
KMO = pd.DataFrame(calculate_kmo(X_fa)[1], index = ["KMO"], columns = ["value"])
EV = pd.DataFrame(fa.get_eigenvalues()[0], index = components_names1, columns = ["eigenvalue"])
EV["explained variance ratio"] = EV["eigenvalue"]/sum(EV["eigenvalue"])
EV["cumulative explained variance"] = np.cumsum(EV["explained variance ratio"])
EV["common factor eigenvalue"] = fa.get_eigenvalues()[1]
LEV = pd.DataFrame(fa.get_factor_variance()[0], index = components_names2, columns = ["SS loadings"])
LEV["explained variance ratio"] = fa.get_factor_variance()[1]
LEV["cumulative explained variance"] = fa.get_factor_variance()[2]
CU = pd.DataFrame(fa.get_communalities(), index = df[decomp_var].columns, columns = ["communality"])
CU["uniqueness"] = fa.get_uniquenesses()
decomp_results["transformed data"] = X_fa_transform
decomp_results["BST"] = BST
decomp_results["KMO"] = KMO
decomp_results["eigenvalues"] = EV
decomp_results["loadings and explained variance"] = LEV
decomp_results["communalities and uniqueness"] = CU
decomp_results["loadings"] = pd.DataFrame(fa.loadings_, index = df[decomp_var].columns, columns = components_names2)
else:
st.error("ERROR: No data available for Modelling!")
return
#----------------------------------------------------------------------------------------------
if run_decomposition:
st.write("")
st.write("")
st.header("**Decomposition outputs**")
if DEC_alg == "Principal Component Analysis":
expander_text = "Principal Component Analysis results"
if DEC_alg == "Factor Analysis":
expander_text = "Factor Analysis results"
decomp_res1 = st.beta_expander(expander_text, expanded = False)
with decomp_res1:
corr_matrix = df[decomp_var].corr()
if len(decomp_var) <= 10:
st.write("Correlation Matrix & 2D-Histogram")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': decomp_var[0], 'variable2': decomp_var[0]})
# Calculate correlation data
corr_data = df[decomp_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[decomp_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("decomp_cor")))
st.write("")
else:
st.write("Correlation matrix:")
st.write(corr_matrix.style.set_precision(user_precision))
st.write("")
# Principal Component Analysis Output
if DEC_alg == "Principal Component Analysis":
st.write("Eigenvalues and explained variance:")
st.table(decomp_results["eigenvalues and explained variance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_pca_eigval")))
st.write("")
st.write("Eigenvectors:")
st.table(decomp_results["eigenvectors"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_pca_eigvec")))
st.write("")
# Scree plot
st.write("Scree plot:")
scree_plot_data = decomp_results["eigenvalues and explained variance"].copy()
scree_plot_data["component"] = decomp_results["eigenvalues and explained variance"].index
scree_plot1 = alt.Chart(scree_plot_data, height = 200).mark_line(point = True).encode(
x = alt.X("component", sort = list(scree_plot_data["component"]), title = "princial component", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("explained variance ratio", title = "proportion of variance", scale = alt.Scale(domain = [0, 1]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["cumulative explained variance", "explained variance ratio", "component",]
)
scree_plot2 = alt.Chart(scree_plot_data, height = 200).mark_line(color = "darkred", point = True).encode(
x = alt.X("component", sort = list(scree_plot_data["component"]), title = "princial component", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("cumulative explained variance", title = "proportion of variance", scale = alt.Scale(domain = [0, 1]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["cumulative explained variance", "explained variance ratio", "component",]
)
st.altair_chart(scree_plot1 + scree_plot2, use_container_width = True)
st.write("")
# 2D principal component plot
if resp_var_dec == "Yes":
st.write("2D principal component plot:")
pc_plot_data = decomp_results["transformed data"].copy()
pc_plot_data["index"] = decomp_results["transformed data"].index
pc_plot = alt.Chart(pc_plot_data, height = 200).mark_circle(point = True).encode(
x = alt.X("pc_1", title = "princial component 1", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("pc_2", title = "princial component 2", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
color = resp_var,
tooltip = ["pc_1", "pc_2", "index",]
)
st.altair_chart(pc_plot, use_container_width = True)
st.write("")
else:
st.write("2D principal component plot:")
pc_plot_data = decomp_results["transformed data"].copy()
pc_plot_data["index"] = decomp_results["transformed data"].index
pc_plot = alt.Chart(pc_plot_data, height = 200).mark_circle(point = True).encode(
x = alt.X("pc_1", title = "princial component 1", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("pc_2", title = "princial component 2", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["pc_1", "pc_2", "index",]
)
st.altair_chart(pc_plot, use_container_width = True)
st.write("")
# Download link for decomposition results
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
corr_matrix.to_excel(excel_file, sheet_name="correlation_matrix")
decomp_results["eigenvalues and explained variance"].to_excel(excel_file, sheet_name="eigval_and_explained_variance")
decomp_results["eigenvectors"].to_excel(excel_file, sheet_name="eigenvectors")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Decomposition output__PCA__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download decomposition output</a>
""",
unsafe_allow_html=True)
st.write("")
# Factor Analysis
if DEC_alg == "Factor Analysis":
col1, col2 = st.beta_columns(2)
with col1:
st.write("Bartlett's Sphericity test:")
st.table(decomp_results["BST"].style.set_precision(user_precision))
with col2:
st.write("Kaiser-Meyer-Olkin criterion:")
st.table(decomp_results["KMO"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_fa_adeqtests")))
st.write("")
st.write("Eigenvalues:")
st.table(decomp_results["eigenvalues"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_fa_eigval")))
st.write("")
st.write("Explained variance:")
st.table(decomp_results["loadings and explained variance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_fa_explvar")))
st.write("")
st.write("Communalities and uniquenesses:")
st.table(decomp_results["communalities and uniqueness"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_fa_comuniq")))
st.write("")
st.write("Loadings:")
st.table(decomp_results["loadings"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("decomp_fa_loadings")))
st.write("")
# Scree plot
st.write("Scree plot:")
scree_plot_data = decomp_results["eigenvalues"].copy()
scree_plot_data["component"] = [str(i+1) for i in range(len(decomp_var))]
scree_plot1 = alt.Chart(scree_plot_data, height = 200).mark_line(point = True).encode(
x = alt.X("component", sort = list(scree_plot_data["component"]), title = "component", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11, labelAngle = 0)),
y = alt.Y("eigenvalue", title = "eigenvalue", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["eigenvalue", "component",]
)
Kaiser_criterion = alt.Chart(pd.DataFrame({'y': [1]}), height = 200).mark_rule(size = 2, color = "darkred").encode(y='y')
st.altair_chart(scree_plot1+ Kaiser_criterion, use_container_width = True)
#if sett_hints:
#st.info(str(fc.learning_hints("mod_md_BRT_thresAUC")))
st.write("")
# 2D factor loadings plot
if nfactors >= 2:
st.write("2D factor loadings plot:")
comp_plot_data = decomp_results["loadings"].copy()
comp_plot_data["variable"] = decomp_results["loadings"].index
comp_plot = alt.Chart(comp_plot_data, height = 200).mark_circle(point = True).encode(
x = alt.X("factor_1", title = "factor 1", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("factor_2", title = "factor 2", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["factor_1", "factor_2", "variable",]
)
yaxis = alt.Chart(pd.DataFrame({'y': [0]}), height = 200).mark_rule(size = 2, color = "darkred").encode(y='y')
xaxis = alt.Chart(pd.DataFrame({'x': [0]}), height = 200).mark_rule(size = 2, color = "darkred").encode(x='x')
st.altair_chart(comp_plot + yaxis + xaxis, use_container_width = True)
st.write("")
# Download link for decomposition results
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
corr_matrix.to_excel(excel_file, sheet_name="correlation_matrix")
decomp_results["BST"].to_excel(excel_file, sheet_name="Bartlett's_sphericity_test")
decomp_results["KMO"].to_excel(excel_file, sheet_name="Kaiser-Meyer-Olkin_criterion")
decomp_results["eigenvalues"].to_excel(excel_file, sheet_name="eigenvalues")
decomp_results["loadings and explained variance"].to_excel(excel_file, sheet_name="explained_variance")
decomp_results["communalities and uniqueness"].to_excel(excel_file, sheet_name="communalities_uniqueness")
decomp_results["loadings"].to_excel(excel_file, sheet_name="loadings")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
if farotation is not None:
dl_file_name= "Decomposition output__FA(" + str(famethod) + ", " + str(farotation) + ")__" + df_name + ".xlsx"
else:
dl_file_name= "Decomposition output__FA(" + str(famethod) + ")__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download decomposition output</a>
""",
unsafe_allow_html=True)
st.write("")
decomp_res2 = st.beta_expander("Transformed data", expanded = False)
with decomp_res2:
# Principal Component Analysis Output
if DEC_alg == "Principal Component Analysis":
st.write(decomp_results["transformed data"].style.set_precision(user_precision))
st.write("Data shape: ", decomp_results["transformed data"].shape[0], " rows and ", decomp_results["transformed data"].shape[1], " columns")
# Download link for transformed data
output = BytesIO()
excel_file = | pd.ExcelWriter(output, engine="xlsxwriter") | pandas.ExcelWriter |
from qualipy.exceptions import InvalidColumn
import pandas as pd
import numpy as np
import os
import types
import json
from typing import Any, Dict, Callable, Optional, Union
import importlib
def get_column(data: pd.DataFrame, name: str) -> pd.Series:
if name == "index":
return data.index
try:
return data[name]
except KeyError:
raise InvalidColumn("Column {} is not part of the dataset".format(name))
HOME = os.path.expanduser("~")
def set_metric_id(data):
data["metric_id"] = (
data.column_name
+ "_"
+ data.metric.astype(str)
+ "_"
# + np.where(
# data.arguments.isnull(),
# "",
# data.arguments.astype(str).str.replace(" ", "").str.replace("'", ""),
# )
)
return data
def set_value_type(data: pd.DataFrame) -> pd.DataFrame:
type = data.return_format.values[0]
data_type = data.type.values[0]
if type == "bool":
data.value = data.value.map(
{"True": True, "False": False, "true": True, "false": False}
)
elif type == "dict" and data_type != "numerical":
data.value = data.value.apply(lambda v: json.loads(v))
elif type == "dict" and data_type == "numerical":
data.value = data.value.astype(float)
else:
data.value = data.value.astype(type)
return data
def copy_func(f: Callable, name: Optional[str] = None) -> Callable:
fn = types.FunctionType(
f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__
)
fn.__dict__.update(f.__dict__)
return fn
def copy_function_spec(function: Union[Dict[str, Any], Callable]):
if isinstance(function, dict):
copied_function = copy_func(function["function"])
copied_function.arguments = function.get("parameters", {})
copied_function.key_function = function.get("key", False)
copied_function.valid_min_range = function.get("valid_min")
copied_function.valid_max_range = function.get("valid_max")
elif isinstance(function, str):
copied_function = copy_func(import_function_by_name_full_path(function))
copied_function.arguments = {}
copied_function.key_function = False
else:
copied_function = copy_func(function)
copied_function.arguments = {}
copied_function.key_function = False
return copied_function
def import_function_by_name(name: str, backend: str) -> Callable:
module = importlib.import_module(f"qualipy.backends.{backend}_backend.functions")
return getattr(module, name)
def import_function_by_name_full_path(name: str) -> Callable:
name = name.split(".")
module_name = ".".join(name[:-1])
function_name = name[-1]
module = importlib.import_module(module_name)
return getattr(module, function_name)
def get_latest_insert_only(data, floor_datetime=False):
group_name = "batch_name"
if floor_datetime:
data["floored_datetime"] = data.date.dt.floor("T")
group_name = "floored_datetime"
data = (
data.groupby(group_name, as_index=False)
.apply(lambda g: g[g.insert_time == g.insert_time.max()])
.reset_index(drop=True)
)
if "floored_datetime" in data.columns:
data = data.drop("floored_datetime", axis=1)
return data
def get_project_data(
project, timezone=None, latest_insert_only=False, floor_datetime=False
):
timezone = "UTC" if timezone is None else timezone
data = project.get_project_table()
try:
data.date = pd.to_datetime(data.date).dt.tz_convert(timezone)
except TypeError:
data.date = | pd.to_datetime(data.date) | pandas.to_datetime |
import textwrap
import warnings
from functools import reduce, partial
import numpy as np
import pandas as pd
import torch
from hdxrate import k_int_from_sequence
from numpy.lib.recfunctions import append_fields
from scipy import constants
import pyhdx
from pyhdx.alignment import align_dataframes
from pyhdx.fileIO import dataframe_to_file
from pyhdx.support import reduce_inter, fields_view
from pyhdx.config import cfg
def protein_wrapper(func, *args, **kwargs):
metadata = kwargs.pop('metadata', {})
[metadata.update(arg.metadata) for arg in args if isinstance(arg, Protein)]
df_args = [arg.df if isinstance(arg, Protein) else arg for arg in args]
return_value = func(*df_args, **kwargs)
if isinstance(return_value, pd.DataFrame):
return Protein(return_value, **metadata)
return return_value
class Protein(object):
"""Object describing a protein
Protein objects are based on panda's DataFrame's with added functionality
Parameters
----------
data : :class:`~numpy.ndarray` or :obj:`dict` or :class:`~pandas.DataFrame`
data object to initiate the protein object from
index : :obj:`str`, optional
Name of the column with the residue number (index column)
**metadata
Dictionary of optional metadata.
"""
def __init__(self, data, index=None, **metadata):
self.metadata = metadata
if isinstance(data, dict) or isinstance(data, np.ndarray):
self.df = pd.DataFrame(data)
self.df.set_index(index, inplace=True)
elif isinstance(data, pd.DataFrame):
self.df = data.copy()
if not self.df.index.is_integer():
raise ValueError(f"Invalid index type {type(self.df.index)} for supplied DataFrame, must be integer index")
if not self.df.index.is_unique:
raise ValueError("Protein dataframe indices must be unique")
new_index = pd.RangeIndex(start=self.df.index.min(), stop=self.df.index.max() + 1, name='r_number')
self.df = self.df.reindex(new_index)
def __str__(self):
s = self.df.__str__()
try:
full_s = f"Protein {self.metadata['name']}\n" + s
return full_s
except KeyError:
return s
def __len__(self):
return len(self.df)
def __getattr__(self, item):
attr = getattr(self.df, item)
if callable(attr):
return partial(protein_wrapper, attr, metadata=self.metadata)
else:
return attr
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def _make_protein(self, df_out, other):
"""Make a new :class:`~pyhdx.models.Protein` object and combine metadata with other metadata"""
metadata = {**self.metadata, **other.metadata}
protein_out = Protein(df_out, index=df_out.index.name, **metadata)
return protein_out
def to_file(self, file_path, include_version=True, include_metadata=True, fmt='csv', **kwargs):
"""
Write Protein data to file.
Parameters
----------
file_path : :obj:`str`
File path to create and write to.
include_version : :obj:`bool`
Set ``True`` to include PyHDX version and current time/date
fmt : :obj:`str`
Formatting to use, options are 'csv' or 'pprint'
include_metadata : :obj:`bool`
If `True`, the objects' metadata is included
**kwargs : :obj:`dict`, optional
Optional additional keyword arguments passed to `df.to_csv`
Returns
-------
None
"""
metadata = self.metadata if include_metadata else include_metadata
dataframe_to_file(file_path, self.df, include_version=include_version, include_metadata=metadata, fmt=fmt, **kwargs)
def set_k_int(self, temperature, pH):
"""
Calculates the intrinsic rate of the sequence. Values of no coverage or prolines are assigned a value of -1
The rates run are for the first residue (1) up to the last residue that is covered by peptides
When the previous residue is unknown the current residue is also assigned a value of -1.g
Parameters
----------
temperature : :obj:`float`
Temperature of the labelling reaction (Kelvin)
pH : :obj:`float`
pH of the labelling reaction
Returns
-------
k_int : :class:`~numpy.ndarray`
Array of intrisic exchange rates
"""
if 'sequence' not in self:
raise ValueError('No sequence data available to calculate intrinsic exchange rates.')
sequence = list(self['sequence']) # Includes 'X' padding at cterm if cterm > last peptide
k_int = k_int_from_sequence(sequence, temperature, pH)
self.df['k_int'] = k_int
return np.array(k_int)
@property
def c_term(self):
return self.df.index.max()
@property
def n_term(self):
return self.df.index.min()
def __getitem__(self, item):
return self.df.__getitem__(item)
def __setitem__(self, index, value):
self.df.__setitem__(index, value)
def __contains__(self, item):
return self.df.__contains__(item)
def __sub__(self, other):
return protein_wrapper(self.df.subtract, other, metadata=self.metadata)
def __add__(self, other):
return protein_wrapper(self.df.add, other, metadata=self.metadata)
def __truediv__(self, other):
return protein_wrapper(self.df.truediv, other, metadata=self.metadata)
def __floordiv__(self, other):
return protein_wrapper(self.df.floordiv, other, metadata=self.metadata)
def __mul__(self, other):
return protein_wrapper(self.df.mul, other, metadata=self.metadata)
class PeptideMasterTable(object):
"""
Main peptide input object. The input pandas DataFrame `data` must have the following entires for each peptide:
start: Residue number of the first amino acid in the peptide
end: Residue number of the last amino acid in the peptide (inclusive)
sequence: Amino acid sequence of the peptide (one letter code)
exposure: Typically the time the sample was exposed to a deuterated solution. This can correspond to other times if
the kinetics of the experiment are set up differently
state: String describing to which state (experimental conditions) the peptide belongs
uptake: Number of deuteriums the peptide has taken up
The following fields are added to the `data` array upon initialization:
_start: Unmodified copy of initial start field
_end: Unmodified copy of initial end field
_sequence: Unmodified copy of initial sequence
ex_residues: Number of residues that undergo deuterium exchange. This number is calculated using the `drop_first` and
`ignore_prolines` parameters
N-terminal residues which are removed because they are either within `drop_first` or they are N-terminal prolines are
marked with 'x' in the `sequence` field. Prolines which are removed because they are in the middle of a peptide are
marked with a lower case 'p' in the sequence field.
The field `scores` is used in calculating exchange rates and can be set by either the `set_backexchange` or
`set_control` methods.
Parameters
----------
data : :class:`~pandas.DataFrame`
Pandas DataFrame with peptide entries.
drop_first : :obj:`int`
Number of N-terminal amino acids to ignore. Default is 1.
d_percentage : :obj:`float`
Percentage of deuterium in the labelling solution.
ignore_prolines : :obj:`bool`
Boolean to toggle ignoring of proline residues. When True these residues are treated as if they're not present
in the protein.
sort : :obj:`bool`
Set to ``True`` to sort the input. Sort order is 'start', 'end', 'sequence', 'exposure', 'state'.
remove_nan : :obj:`bool`
Set to ``True`` to remove NaN entries in uptake
"""
def __init__(self, data, drop_first=1, ignore_prolines=True, d_percentage=100., sort=True, remove_nan=True):
assert np.all(data['start'] < data['end']), 'All `start` entries must be smaller than their `end` entries'
assert 0 <= d_percentage <= 100., 'Deuteration percentage must be between 0 and 100'
d_percentage /= 100.
self.data = data.copy().reset_index(drop=True)
self.data.index.name = 'peptide_index'
if remove_nan:
self.data = self.data.dropna(subset=['uptake'])
if sort:
self.data = self.data.sort_values(['start', 'end', 'sequence', 'state', 'exposure'])
for col in ['start', 'end', 'sequence']:
target = '_' + col
if target in self.data:
continue
else:
self.data[target] = self.data[col]
# Convert sequence to upper case if not so already
self.data['sequence'] = self.data['sequence'].str.upper()
# Mark ignored prolines with lower case letters
if ignore_prolines:
self.data['sequence'] = [s.replace('P', 'p') for s in self.data['sequence']]
# Find the total number of n terminal / c_terminal residues to remove
# Todo: edge cases such as pure prolines or overlap between c terminal prolines and drop_first section (issue 32)
n_term = np.array([len(seq) - len(seq[drop_first:].lstrip('p')) for seq in self.data['sequence']])
c_term = np.array([len(seq) - len(seq.rstrip('p')) for seq in self.data['sequence']])
# Mark removed n terminal residues with lower case x
self.data['sequence'] = ['x'*nt + s[nt:] for nt, s in zip(n_term, self.data['sequence'])]
self.data['start'] += n_term
self.data['end'] -= c_term
ex_residues = np.array([len(s) - s.count('x') - s.count('p') for s in self.data['sequence']]) * d_percentage
if 'ex_residues' not in self.data:
self.data['ex_residues'] = ex_residues
def __len__(self):
return self.data.shape[0]
def get_state(self, state):
"""
Returns entries in the table with state 'state'
Rows with NaN entries for 'uptake_corrected' are removed
Parameters
----------
state : :obj:`str`
Returns
-------
"""
if not isinstance(state, str):
raise TypeError(f'State must be type `str`, got {type(state)}')
data = self.data.query(f'state == "{state}"').copy()
if 'uptake_corrected' in data.columns:
data.dropna(subset=['uptake_corrected'], inplace=True)
return data
def set_backexchange(self, back_exchange):
"""
Sets the normalized percentage of uptake through a fixed backexchange value for all peptides.
Parameters
----------
back_exchange : :obj:`float`
Percentage of back exchange
"""
back_exchange /= 100
rfu = self.data['uptake'] / ((1-back_exchange)*self.data['ex_residues'])
uptake_corrected = self.data['uptake'] / (1 - back_exchange)
self.data = append_fields(self.data, ['rfu', 'uptake_corrected'], data=[rfu, uptake_corrected], usemask=False)
def set_control(self, control_1, control_0=None):
"""
Apply a control dataset to this object. The column 'RFU' is added to the object by normalizing its uptake
value with respect to the control uptake value to one.
Optionally, ``control_zero`` can be specified which is a dataset whose uptake value will be used to zero
the uptake.
Nonmatching peptides are set to NaN
#todo insert math
Parameters
----------
control_1 : :obj:`tuple`
tuple with (`state`, `exposure`) for peptides to use for normalization (FD control)
control_0 : :obj:`tuple`, optional
tuple with (`state`, `exposure`) for peptides to use for zeroing uptake values (ND control)
"""
try:
fd_df = self.get_data(*control_1)[['start', 'end', 'uptake']].set_index(['start', 'end'], verify_integrity=True)
except ValueError as e:
raise ValueError("FD control has duplicate entries") from e
if fd_df.size == 0:
raise ValueError(f'No matching peptides with state {control_1[0]} and exposure {control_1[1]}')
try:
if control_0 is None:
nd_df = self.get_data(*control_1).copy()[['start', 'end', 'uptake']].set_index(['start', 'end'], verify_integrity=True)
nd_df['uptake'] = 0
else:
nd_df = self.get_data(*control_0)[['start', 'end', 'uptake']].set_index(['start', 'end'], verify_integrity=True)
if nd_df.size == 0:
raise ValueError(f'No matching peptides with state {control_0[0]} and exposure {control_0[1]}')
except ValueError as e:
raise ValueError("ND control has duplicate entries") from e
self.data.set_index(['start', 'end'], append=True, inplace=True)
self.data.reset_index(level=0, inplace=True)
self.data['rfu'] = (self.data['uptake'] - nd_df['uptake']) / (fd_df['uptake'] - nd_df['uptake'])
self.data['uptake_corrected'] = self.data['rfu'] * self.data['ex_residues']
self.data = self.data.set_index('peptide_index', append=True).reset_index(level=[0, 1])
def select(self, **kwargs):
"""
Select data based on column values.
Parameters
----------
kwargs: :obj:`dict`
Column name, value pairs to select
Returns
-------
output_data : :class:`~pandas.DataFrame`
DataFrame with selected peptides
"""
masks = [self.data[k] == v for k, v in kwargs.items()]
m = np.logical_and.reduce(masks)
return self.data[m]
def get_data(self, state, exposure):
"""
Get all peptides matching `state` and `exposure`.
Parameters
----------
state : :obj:`str`
Measurement state
exposure : :obj:`float`
Measurement exposure time
Returns
-------
output_data : :class:`~pandas.DataFrame`
DataFrame with selected peptides
"""
return self.select(state=state, exposure=exposure)
@property
def states(self):
""":class:`~numpy.ndarray` Array with unique states"""
return np.unique(self.data['state'])
@property
def exposures(self):
""":class:`~numpy.ndarray` Array with unique exposures"""
return np.unique(self.data['exposure'])
class Coverage(object):
"""
Object describing layout and coverage of peptides and generating the corresponding matrices. Peptides should all
belong to the same state and have the same exposure time.
Parameters
----------
data : :class:`~pandas.DataFrame`
DataFrame with input peptides
c_term : :obj:`int`
Residue index number of the C-terminal residue (where first residue in index number 1)
n_term : :obj:`int`
Residue index of the N-terminal residue. Default value is 1, can be negative to accomodate for N-terminal
purification tags
sequence : :obj:`str`
Amino acid sequence of the protein in one-letter FASTA encoding. Optional, if not specified the amino acid sequence
from the peptide data is used to (partially) reconstruct the sequence. Supplied amino acid sequence must be
compatible with sequence information in the peptides.
Attributes
----------
X : :class:`~numpy.ndarray`
N x M matrix where N is the number of peptides and M equal to `prot_len`.
Values are 1/(ex_residues) where there is coverage.
Z : :class:`~numpy.ndarray`
N x M matrix where N is the number of peptides and M equal to `prot_len`.
Values are 1/(ex_residues) where there is coverage,
#todo account for prolines: so that rows sum to 1 is currently not true
"""
def __init__(self, data, c_term=0, n_term=1, sequence=''):
assert len(np.unique(data['exposure'])) == 1, 'Exposure entries are not unique'
assert len(np.unique(data['state'])) == 1, 'State entries are not unique'
self.data = data.sort_values(['start', 'end'], axis=0)
start = self.data['_start'].min()
end = self.data['_end'].max()
if n_term:
start = min(start, n_term)
if sequence and not c_term:
c_term = len(sequence) + n_term - 1
if c_term:
if c_term + 1 < end:
raise ValueError("HDX data extends beyond c_term number, check 'sequence' or 'c_term'")
end = c_term + 1 # c_term is inclusive, therefore plus one
r_number = pd.RangeIndex(start, end, name='r_number') # r_number spanning the full protein range, not just the covered range
# Full sequence
_seq = pd.Series(index=r_number, dtype='U').fillna('X') # Full sequence
# Sequence with lower case letters for no coverage due to n_terminal residues or prolines
seq = pd.Series(index=r_number, dtype='U').fillna('X')
for idx in self.data.index[::-1]:
start, end = self.data.loc[idx, '_start'], self.data.loc[idx, '_end']
_seq.loc[start: end-1] = list(self.data.loc[idx, '_sequence'])
seq.loc[start: end-1] = list(self.data.loc[idx, 'sequence'])# = list(d['sequence'])
if sequence:
for r, s1, s2 in zip(r_number, sequence, _seq):
if s2 != 'X' and s1 != s2:
raise ValueError(
f"Mismatch in supplied sequence and peptides sequence at residue {r}, expected '{s2}', got '{s1}'")
if len(sequence) != len(_seq):
raise ValueError("Invalid length of supplied sequence. Please check 'n_term' and 'c_term' parameters")
_seq = list(sequence)
#todo check if this is always correctly determined (n terminal residues usw)
exchanges = [s.isupper() and (s != 'X') for s in seq] # Boolean array True if residue exchanges, full length
coverage = seq != 'X' # Boolean array for coverage
protein_df = pd.DataFrame({'sequence': _seq, 'coverage': coverage, 'exchanges': exchanges}, index=r_number)
# Inclusive, exclusive interval of peptides coverage across the whole protein
self.interval = (np.min(self.data['start']), np.max(self.data['end']))
self.protein = Protein(protein_df, index='r_number')
# matrix dimensions N_peptides N_residues, dtype for TF compatibility
_exchanges = self['exchanges'] # Array only on covered part
self.X = np.zeros((len(self.data), self.interval[1] - self.interval[0]), dtype=int)
self.Z = np.zeros_like(self.X, dtype=float)
for row, idx in enumerate(self.data.index):
start, end = self.data.loc[idx, 'start'], self.data.loc[idx, 'end']
i0, i1 = self.r_number.get_loc(start), self.r_number.get_loc(end - 1)
#i0, i1 = np.searchsorted(self.r_number, (entry['start'], entry['end']))
self.X[row][i0:i1+1] = 1
self.Z[row][i0:i1+1] = _exchanges[i0:i1+1]
self.Z = self.Z / self.data['ex_residues'].to_numpy()[:, np.newaxis]
def __len__(self):
return len(self.data)
def __getitem__(self, item):
pd_series = self.protein[item]
return self.apply_interval(pd_series)
def apply_interval(self, array_or_series):
"""
Given a Numpy array or Pandas series with a length equal to the full protein, returns the section of the array equal to the covered
region. Returned series length is equal to number of columns in the X matrix
Parameters
----------
np.narray or pd.series
"""
if isinstance(array_or_series, np.ndarray):
series = pd.Series(array_or_series, index=self.protein.df.index)
assert len(array_or_series) == len(self.protein)
else:
series = array_or_series
# - 1 because interval is inclusive, exclusive and .loc slices inclusive, inclusive
covered_slice = series.loc[self.interval[0]:self.interval[1] - 1]
return covered_slice
@property
def percent_coverage(self):
""":obj:`float`: Percentage of residues covered by peptides"""
return 100*np.mean(self.protein['coverage'])
@property
def redundancy(self):
""":obj:`float`: Average redundancy of peptides in regions with at least 1 peptide"""
x_coverage = self.X[:, self['coverage']]
return np.mean(np.sum(x_coverage, axis=0))
@property
def Np(self):
""":obj:`int`: Number of peptides."""
return self.X.shape[0]
@property
def Nr(self):
""":obj:`int`: Total number of residues spanned by the peptides."""
return self.X.shape[1]
@property
def r_number(self):
""":class:`~pandas.RangeIndex`: Pandas index numbers corresponding to the part of the protein covered by peptides"""
return pd.RangeIndex(self.interval[0], self.interval[1], name='r_number')
@property
def index(self):
""":class:`~pandas.RangeIndex`: Pandas index numbers corresponding to the part of the protein covered by peptides"""
return self.r_number
@property
def block_length(self):
""":class:`~numpy.ndarary`: Lengths of unique blocks of residues in the peptides map,
along the `r_number` axis"""
# indices are start and stop values of blocks
indices = np.sort(np.concatenate([self.data['start'], self.data['end']]))
#indices of insertion into r_number vector gives us blocks with taking prolines into account.
diffs = np.diff(np.searchsorted(self.r_number, indices))
block_length = diffs[diffs != 0]
return block_length
@property
def X_norm(self):
""":class:`~numpy.ndarray`: `X` coefficient matrix normalized column wise."""
return self.X / np.sum(self.X, axis=0)[np.newaxis, :]
@property
def Z_norm(self):
""":class:`~numpy.ndarray`: `Z` coefficient matrix normalized column wise."""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
z_norm = self.Z / np.sum(self.Z, axis=0)[np.newaxis, :]
return z_norm
def get_sections(self, gap_size=-1):
"""Get the intervals of independent sections of coverage.
Intervals are inclusive, exclusive.
Gaps are defined with `gap_size`, adjacent peptides with distances bigger than this value are considered not to
overlap. Set to -1 to treat touching peptides as belonging to the same section.
Parameters
----------
gap_size : :obj:`int`
The size which defines a gap
"""
intervals = [(s, e) for s, e in zip(self.data['start'], self.data['end'])]
sections = reduce_inter(intervals, gap_size=gap_size)
return sections
def __eq__(self, other):
"""Coverage objects are considered equal if both objects fully match between their start, end and sequence fields"""
assert isinstance(other, Coverage), "Other must be an instance of Coverage"
return len(self.data) == len(other.data) and np.all(self.data['start'] == other.data['start']) and \
np.all(self.data['end'] == other.data['end']) and np.all(self.data['sequence'] == other.data['sequence'])
class HDXMeasurement(object):
"""
Main HDX data object. This object has peptide data of a single state but with multiple timepoints.
Timepoint data is split into :class:`~pyhdx.models.PeptideMeasurements` objects for each timepoint
Supplied data is made 'uniform' such that all timepoints have the same peptides
Parameters
----------
data : :class:`~pandas.DataFrame`
Pandas dataframe with all peptides belonging to a single state.
**metadata
Dictionary of optional metadata. By default, holds the `temperature` and `pH` parameters.
Attributes
----------
data: :class:`~pandas.DataFrame`
Pandas dataframe with all peptides
state : :obj:`str`
State of the HDX measurement
timepoints : :class:`~numpy.ndarray`
Array with exposure times (sorted)
peptides : :obj:`list`
List of :class:`~pyhdx.models.PeptideMeasurements`, one list element per timepoint.
coverage : :class:`~pyhdx.models.Coverage`
Coverage object describing peptide layout.
"""
def __init__(self, data, **metadata):
self.metadata = metadata
assert len(data['state'].unique()) == 1
self.state = str(data['state'].iloc[0])
self.timepoints = np.sort(np.unique(data['exposure']))
# Obtain the intersection of peptides per timepoint
data_list = [(data[data['exposure'] == exposure]).set_index(['_start', '_end']) for exposure in self.timepoints]
index_intersection = reduce(pd.Index.intersection, [d.index for d in data_list])
intersected_data = [df.loc[index_intersection].reset_index() for df in data_list]
cov_kwargs = {kwarg: metadata.get(kwarg, default) for kwarg, default in zip(['c_term', 'n_term', 'sequence'], [0, 1, ''])}
self.peptides = [HDXTimepoint(df, **cov_kwargs) for df in intersected_data]
# Create coverage object from the first time point (as all are now equal)
self.coverage = Coverage(intersected_data[0], **cov_kwargs)
if self.temperature and self.pH:
self.coverage.protein.set_k_int(self.temperature, self.pH)
self.data = pd.concat(intersected_data, axis=0, ignore_index=True)
self.data.index.name = 'peptide_index'
def __str__(self):
"""
String representation of HDX measurement object.
Returns
-------
s : `obj`:str:
Multiline string describing this HDX Measurement object
"""
timepoints = ', '.join([f'{t:.2f}' for t in self.timepoints])
s = f"""
HDX Measurement: {self.name}
Number of peptides: {self.Np}
Number of residues: {self.Nr} ({self.coverage.interval[0]} - {self.coverage.interval[1]})
Number of timepoints: {self.Nt}
Timepoints: {timepoints} seconds
Coverage Percentage: {self.coverage.percent_coverage:.2f}
Average redundancy: {self.coverage.redundancy:.2f}
Temperature: {self.temperature} K
pH: {self.pH}
"""
return textwrap.dedent(s.lstrip('\n'))
def _repr_markdown_(self):
s = str(self)
s = s.replace('\n', '<br>')
return s
@property
def name(self):
""":obj:`str`: HDX Measurement name"""
return self.metadata.get('name', self.state)
@property
def temperature(self):
""":obj:`float`: Temperature of the H/D exchagne reaction (K)."""
return self.metadata.get('temperature', None)
@property
def pH(self):
"""pH of the H/D exchange reaction."""
return self.metadata.get('pH', None)
@property
def Np(self):
""":obj:`int`: Number of peptides."""
return self.coverage.Np
@property
def Nr(self):
""":obj:`int`: Total number of residues spanned by the peptides."""
return self.coverage.Nr
@property
def Nt(self):
""":obj:`int`: Number of timepoints."""
return len(self.timepoints)
def __len__(self):
import warnings
warnings.warn('Use hdxm.Nt instead', DeprecationWarning)
return len(self.timepoints)
def __iter__(self):
return self.peptides.__iter__()
def __getitem__(self, item):
return self.peptides.__getitem__(item)
@property
def rfu_residues(self):
""":class:`~pandas.DataFrame`: Relative fractional uptake per residue. Shape Nr x Nt"""
df = pd.concat([v.rfu_residues for v in self], keys=self.timepoints, axis=1)
df.columns.name = 'exposure'
return df
@property
def rfu_peptides(self):
""":class:`~pandas.DataFrame`: Relative fractional uptake per peptide. Shape Np x Nt"""
df = pd.concat([v.rfu_peptides for v in self], keys=self.timepoints, axis=1)
df.columns.name = 'exposure'
return df
@property
def d_exp(self):
""":class:`~pandas.DataFrame`: D-uptake values (corrected). Shape Np x Nt"""
df = | pd.concat([v.d_exp for v in self], keys=self.timepoints, axis=1) | pandas.concat |
__author__ = 'marcopereira'
import os
from datetime import date
import pandas as pd
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
WORKING_DIR = os.path.join(BASE_DIR, 'workspace')
trim_start = date(2005,1,10)
trim_end = date(2006,1,10)
start = date(2005, 3, 30)
referenceDate = date(2005, 3, 30) # 6 months after trim_start
simNumber = 5
R = 0.4
inArrears = True
freq = '3M'
periods = ['5Y']
observationdate = minDay = date(2005,1,10) # Observation Date
# % Vasicek initial guesses
x0Vas = []
x0Vas.append([0.000377701101971, 0.06807420742631265, 0.020205128906558, 0.002073084987793])
x0Vas.append([0.000279919484103, 0.09181159494219767, 0.020199490652279, 0.002074503244439])
x0Vas.append([0.000279098482384, 0.37478438638015319, -0.043475095829618, 0.005391997288885])
x0Vas.append([0.000241182283994, 0.37624139076990623, -0.039701685607549, 0.007109990514207])
x0Vas = | pd.DataFrame(x0Vas) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
" Load the data from 2 csv files
"
" Args:
" messages_filepath: file path of the csv file containing the messages
" categories_filepath: file path of the csv file containing the categories
"
" Returns:
" a dataframe
"
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = | pd.merge(messages, categories, on='id') | pandas.merge |
import pandas as pd
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import seaborn as sns
import matplotlib.pyplot as plt
# Concatenate review dataframes
reviews0 = | pd.read_csv('Analytics/reviews/reviews0.csv') | pandas.read_csv |
"""
Entry points for compass
"""
from __future__ import absolute_import, print_function, division
import argparse
import os
import multiprocessing
import numpy as np
import pandas as pd
import sys
import subprocess as sp
import logging
import datetime
import json
import gzip
from functools import partial
from tqdm import tqdm
from six import string_types
from math import ceil
from .compass import cache
from ._version import __version__
from .compass.torque import submitCompassTorque
from .compass.algorithm import singleSampleCompass, maximize_reaction_range, maximize_metab_range, initialize_cplex_problem
from .compass.algorithm_t import runCompassParallelTransposed
from .compass.microclustering import microcluster, pool_matrix_cols, unpool_columns
from .models import init_model
from .compass.penalties import eval_reaction_penalties, compute_knn
from . import globals
from . import utils
def parseArgs():
"""Defines the command-line arguments and parses the Compass call
Returns
-------
argparse.Namespace
"""
parser = argparse.ArgumentParser(
prog="Compass",
description="Compass version "+str(__version__)+
". Metabolic Modeling for Single Cells. "
"For more details on usage refer to the documentation: https://yoseflab.github.io/Compass/")
parser.add_argument("--data", help="Gene expression matrix."
" Should be a tsv file with one row per gene and one column per sample",
metavar="FILE")
parser.add_argument("--data-mtx", help="Gene expression matrix."
" Should be a matrix market (mtx) formatted gene file. Must be followed by a tsv file with row names corresponding to genes and optionally that can be followed by a tsv file with sample names. ",
nargs="+",
metavar="FILE")
parser.add_argument("--model", help="Metabolic Model to Use."
" Currently supporting: RECON1_mat, RECON2_mat, or RECON2.2",
default="RECON2_mat",
choices=["RECON1_mat", "RECON2_mat", "RECON2.2", "RECON3"],
metavar="MODEL")
parser.add_argument("--species",
help="Species to use to match genes to model."
" Currently supporting: homo_sapiens or mus_musculus",
choices=["homo_sapiens", "mus_musculus"],
metavar="SPECIES",
default="homo_sapiens")
parser.add_argument("--media", help="Which media to simulate",
metavar="MEDIA")
parser.add_argument("--output-dir", help="Where to store outputs",
default='.',
metavar="DIR")
parser.add_argument("--temp-dir", help="Where to store temporary files",
default='<output-dir>/_tmp',
metavar="DIR")
parser.add_argument("--torque-queue", help="Submit to a Torque queue",
metavar="QUEUE")
parser.add_argument("--num-processes",
help="Limit to <N> Processes. "
"Ignored when submitting job onto a queue",
type=int,
metavar="N")
parser.add_argument("--lambda",
help="Smoothing factor for single-cell data. Should be"
" set between 0 and 1",
type=float,
default=0,
metavar="F")
parser.add_argument("--single-sample",
help=argparse.SUPPRESS,
type=int,
metavar="N")
#Arguments to help with schedueler scripts
parser.add_argument("--transposed",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--sample-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--reaction-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--metabolite-range",
help=argparse.SUPPRESS,
nargs=2)
parser.add_argument("--generate-cache",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--test-mode",
help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("--num-threads",
help="Number of threads to use per sample",
type=int, default=1,
metavar="N")
parser.add_argument(
"--and-function",
help="Which function used to aggregate AND associations",
choices=["min", "median", "mean"],
metavar="FXN",
default="mean")
parser.add_argument(
"--select-reactions",
help="Compute compass scores only for the reactions listed in the given file. FILE is expected to be textual, with one line per reaction (undirected, namely adding the suffix \"_pos\" or \"_neg\" to a line will create a valid directed reaction id). Unrecognized reactions in FILE are ignored.",
required=False,
metavar="FILE")
parser.add_argument(
"--select-subsystems",
help="Compute compass scores only for the subsystems listed in the given file. FILE is expected to be textual, with one line per subsystem. Unrecognized subsystems in FILE are ignored.",
required=False,
metavar="FILE")
parser.add_argument("--glucose", type=float,
required=False, help=argparse.SUPPRESS)
# Hidden argument. Used for batch jobs
parser.add_argument("--collect", action="store_true",
help=argparse.SUPPRESS)
# Also used for batch jobs
parser.add_argument("--config-file", help=argparse.SUPPRESS)
parser.add_argument("--num-neighbors",
help="Either effective number of neighbors for "
"gaussian penalty diffusion or exact number of "
"neighbors for KNN penalty diffusion",
default=30,
type=int,
metavar="N")
parser.add_argument("--symmetric-kernel", action="store_true",
help="Use symmetric TSNE kernel (slower)")
parser.add_argument("--input-weights",
help="File with input sample to sample weights",
required=False, metavar="FILE")
parser.add_argument("--penalty-diffusion",
help="Mode to use to share reaction penalty "
"values between single cells",
choices=["gaussian", "knn"],
metavar="MODE",
default="gaussian")
parser.add_argument("--no-reactions", action="store_true",
help="Skip computing scores for reactions")
parser.add_argument("--calc-metabolites", action="store_true",
help="Compute scores for metabolite "
"uptake/secretion")
parser.add_argument("--precache", action="store_true",
help="Preprocesses the model to find "
" maximum fluxes")
parser.add_argument("--input-knn", help="File with a precomputed knn graph for the samples. "
"File must be a tsv with one row per sample and (k+1) columns. The first column should be sample names, "
"and the next k columns should be indices of the k nearest neighbors (by their order in column 1)",
default=None, metavar="FILE")
parser.add_argument("--output-knn", help="File to save kNN of data to. "
"File will be a tsv with one row per sample and (k+1) columns. The first column will be sample names, "
"and the next k columns will be indices of the k nearest neighbors (by their order in column 1)",
default=None, metavar="FILE")
parser.add_argument("--latent-space", help="File with latent space reprsentation of samples for knn clustering. "
"File must a tsv with one row per sample and one column per dimension of the latent space.",
default=None, metavar="FILE")
parser.add_argument("--only-penalties", help="Flag for Compass to only compute the reaction penalties for the dataset.",
action="store_true", default=None)
parser.add_argument("--example-inputs", help="Flag for Compass to list the directory where example inputs can be found.",
action="store_true", default=None)
parser.add_argument("--microcluster-size",
type=int, metavar="C", default=None,
help="Target number of cells per microcluster")
#Hidden argument which tracks more detailed information on runtimes
parser.add_argument("--detailed-perf", action="store_true",
help=argparse.SUPPRESS)
#Hidden argument for testing purposes.
parser.add_argument("--penalties-file",
help=argparse.SUPPRESS,
default='')
#Hidden argument to choose the algorithm CPLEX uses. Barrier generally best choice.
#See - https://www.ibm.com/support/knowledgecenter/en/SS9UKU_12.10.0/com.ibm.cplex.zos.help/CPLEX/Parameters/topics/LPMETHOD.html
parser.add_argument("--lpmethod",
help=argparse.SUPPRESS,
default=4,
type=int)
#Hidden argument to choose the setting for Cplex's advanced basis setting. Generally 2 is the best, but for ease of testing I've added it here.
parser.add_argument("--advance",
help=argparse.SUPPRESS,
default=2,
type=int)
#Hidden argument to save argmaxes in the temp directory
parser.add_argument("--save-argmaxes", action="store_true",
help=argparse.SUPPRESS)
#Argument to output the list of needed genes to a file
parser.add_argument("--list-genes", default=None, metavar="FILE",
help="File to output a list of metabolic genes needed for selected metabolic model.")
args = parser.parse_args()
args = vars(args) # Convert to a Dictionary
load_config(args)
if args['data'] and args['data_mtx']:
parser.error("--data and --data-mtx cannot be used at the same time. Select only one input per run.")
if not args['data'] and not args['data_mtx']:
if not args['precache'] and not args['list_genes'] and not args['example_inputs']:
parser.error("--data or --data-mtx required unless --precache, --list-genes, or --example-inputs option selected")
else:
if args['data_mtx']:
args['data'] = args['data_mtx']
else:
if type(args['data']) != list:
args['data'] = [args['data']]
args['data'] = [os.path.abspath(p) for p in args['data']]
if len(args['data']) == 2:
args['data'].append(None)
if args['input_weights']:
args['input_weights'] = os.path.abspath(args['input_weights'])
if args['select_reactions']:
args['select_reactions'] = os.path.abspath(args['select_reactions'])
if args['select_subsystems']:
args['select_subsystems'] = os.path.abspath(args['select_subsystems'])
if args['temp_dir'] == "<output-dir>/_tmp":
args['temp_dir'] = os.path.join(args['output_dir'], '_tmp')
args['output_dir'] = os.path.abspath(args['output_dir'])
args['temp_dir'] = os.path.abspath(args['temp_dir'])
if args['input_knn']:
args['input_knn'] = os.path.abspath(args['input_knn'])
if args['output_knn']:
args['output_knn'] = os.path.abspath(args['output_knn'])
if args['latent_space']:
args['latent_space'] = os.path.abspath(args['latent_space'])
if args['lambda'] < 0 or args['lambda'] > 1:
parser.error(
"'lambda' parameter cannot be less than 0 or greater than 1"
)
if args['generate_cache'] and \
(args['no_reactions'] or not args['calc_metabolites']):
parser.error(
"--generate-cache cannot be run with --no-reactions or "
"without --calc-metabolites" #Not sure about why this needs metabolites to calculated
)
if args['reaction_range']:
args['reaction_range'] = [int(x) for x in args['reaction_range']]
if args['metabolite_range']:
args['metabolite_range'] = [int(x) for x in args['metabolite_range']]
if args['sample_range']:
args['sample_range'] = [int(x) for x in args['sample_range']]
return args
def entry():
"""Entry point for the compass command-line script
"""
start_time = datetime.datetime.now()
args = parseArgs()
if args['data']:
if not os.path.isdir(args['output_dir']):
os.makedirs(args['output_dir'])
if not os.path.isdir(args['temp_dir']) and args['temp_dir'] != '/dev/null':
os.makedirs(args['temp_dir'])
globals.init_logger(args['output_dir'])
# Log some things for debugging/record
logger = logging.getLogger('compass')
logger.debug("Compass version: " + __version__)
try:
commit = sp.check_output(
["git", '--git-dir', globals.GIT_DIR, "rev-parse", "--short",
"HEAD"],
stderr=open(os.devnull, 'w')
)
logger.debug("Git commit: " + commit.decode())
except sp.CalledProcessError:
logger.debug("Git commit: Not in Git repo")
logger.debug("Python Version:")
logger.debug(sys.version)
logger.debug("Python prefix: " + sys.prefix)
logger.debug("Numpy version: " + np.__version__)
logger.debug("Pandas version: " + pd.__version__)
logger.debug("Supplied Arguments: ")
for (key, val) in args.items():
logger.debug(" {}: {}".format(key, val))
logger.debug("\nCOMPASS Started: {}".format(start_time))
# Parse arguments and decide what course of action to take
if args['microcluster_size'] and args['data']:
logger.info("Partitioning dataset into "+str(args['microcluster_size'])+" microclusters")
data = utils.read_data(args['data'])
pools = microcluster(data, cellsPerPartition = args['microcluster_size'],
n_jobs = args['num_processes'], latentSpace=args['latent_space'])
pooled_data = pool_matrix_cols(data, pools)
pooled_data_file = os.path.join(args['temp_dir'], "pooled_data.tsv")
pooled_data.to_csv(pooled_data_file, sep="\t")
pools_file = os.path.join(args['temp_dir'], "pools.json")
with open(pools_file, 'w') as fout:
json.dump(pools, fout)
fout.close()
args['orig_data'] = args['data']
args['data'] = [pooled_data_file]
args['pools_file'] = pools_file
if args['latent_space']:
logger.info("Partitioning latent space into "+str(args['microcluster_size'])+" microclusters")
latent= | pd.read_csv(args['latent_space'], sep='\t', index_col=0) | pandas.read_csv |
#Gathers domains that DHS site has membership in
#mm10_domains.csv generated by modification of Genome liftover of mm9_domains from domain paper in domain_id_assignment.py
#DHS_intergenic_#.csv generated by UNKONWN
#Exports DHS_#_with_domain.csv
import pandas as pd
import matplotlib.pyplot as plt
import csv
printed = False
def duplicates(list, item):
"""Returns index locations of item in list"""
return [i for i, x in enumerate(list) if x == item]
chromosomes = []
start = []
end = []
id = []
chromosome_locations = {}
#load domain data
domain_data = pd.read_csv("data/mm10_data/domains/domains.csv", header=None, index_col=False)
# #Get rid of header row in csv
# domain_data = domain_data.iloc[1:] -- header does not exist in mm10 file
for row in domain_data[0]:
chromosomes.append(row)
for row in domain_data[1]:
start.append(row)
for row in domain_data[2]:
end.append(row)
for x in range(len(chromosomes)):
id.append(str(chromosomes[x]) + ":" + str(start[x]) + "-" + str(end[x]))
print("Domains loaded")
uq_chrom = set(chromosomes)
for item in uq_chrom:
chromosome_locations[item] = duplicates(chromosomes, item)
domain_groups = []
for x in range(len(start)):
domain_groups.append([chromosomes[x],start[x], end[x], id[x]])
domain_groups_1 = {}
domain_groups_2 = {}
domain_groups_4 = {}
domain_groups_8 = {}
domain_dhs_count_1 = []
domain_dhs_count_2 = []
domain_dhs_count_4 = []
domain_dhs_count_8 = []
files = ['1','2','4','8']
for file in files:
print(file + " cell started")
DHS_data = []
DHS_chromosomes = []
DHS_start = []
DHS_end = []
DHS_type = []
mismatched_domains = []
domain_dhs_count = None
domains = []
if file == '1':
domain_group = domain_groups_1
domain_dhs_count = domain_dhs_count_1
if file == '2':
domain_group = domain_groups_2
domain_dhs_count = domain_dhs_count_2
if file == '4':
domain_group = domain_groups_4
domain_dhs_count = domain_dhs_count_4
if file == '8':
domain_group = domain_groups_8
domain_dhs_count = domain_dhs_count_8
#load DHS data
#TODO: Find where this is before rerunning
csv_file = "data/mm10_data/DHSs/DHSs_intergenic_" + file + ".csv"
DHS_data_df = | pd.read_csv(csv_file, header=None, index_col=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = | DataFrame(gen) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Links:
# - [imports](#imports)
# - [Pytorch Lightning](#pytorch_lightning)
# - [Bouts](#bouts)
# - [Train](#train)
# - [Plot](#plot)
# # Imports <a id='imports'></a>
# In[771]:
import pandas as pd
import ast
import os
from glob import glob
import numpy as np
import scipy
from sklearn import metrics
from sklearn import dummy
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import KFold
from sklearn import linear_model
from sklearn import ensemble
from sklearn.model_selection import cross_val_score
from sklearn.dummy import DummyRegressor, DummyClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, Sampler
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from skorch import NeuralNet, NeuralNetRegressor
from hypnospy import Wearable, Experiment, Diary
from hypnospy.data import MESAPreProcessing
from hypnospy.analysis import NonWearingDetector, SleepBoudaryDetector, Viewer, PhysicalActivity, Validator, CircadianAnalysis
from hypnospy.analysis import SleepMetrics, SleepWakeAnalysis
from HypnosPy.healthyForce.ML_misc import get_dataframes
class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(24*120, 80),
nn.ReLU(True),
nn.Linear(80, 64),
nn.ReLU(True), nn.Linear(64, 12), nn.ReLU(True), nn.Linear(12, 3))
self.decoder = nn.Sequential(
nn.Linear(3, 12),
nn.ReLU(True),
nn.Linear(12, 64),
nn.ReLU(True),
nn.Linear(64, 80),
nn.ReLU(True), nn.Linear(80, 120*24), nn.Tanh())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# ===== Encoders for 2280-D ========#
class LinearEncoder2880(nn.Module):
def __init__(self):
super().__init__()
self.encode = nn.Sequential(
nn.Linear(2880, 1440), nn.ReLU(),
nn.Linear(1440, 360), nn.ReLU(),
nn.Linear(360, 90), nn.ReLU(),
nn.Linear(90, 30), nn.ReLU(),
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class LinearDecoder2880(nn.Module):
def __init__(self):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(30, 90), nn.ReLU(),
nn.Linear(90, 360), nn.ReLU(),
nn.Linear(360, 1440), nn.ReLU(),
nn.Linear(1440, 2880),
)
def forward(self, X):
decoded = self.decode(X)
return decoded
class ConvEncoder(nn.Module):
def __init__(self):
super().__init__()
# Expected input to CNN is (Batch, Channels, L)
# ()
self.encode = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=4, kernel_size=20, stride=10, padding=0), nn.ReLU(),
nn.Conv1d(in_channels=4, out_channels=8, kernel_size=6, stride=4, padding=0), nn.ReLU(),
nn.Dropout(0.2),
nn.Flatten(),
nn.Linear(568, 256), nn.ReLU(),
nn.Linear(256, 128), nn.ReLU(),
nn.Linear(128, 32), nn.ReLU(),
)
def forward(self, X):
X = X.unsqueeze(1)
encoded = self.encode(X)
return encoded
class ConvDecoder(nn.Module):
def __init__(self):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(32, 128), nn.ReLU(),
nn.Linear(128, 256), nn.ReLU(),
nn.Linear(256, 568), nn.ReLU(),
nn.Unflatten(1, torch.Size([8, 71])),
nn.ConvTranspose1d(in_channels=8, out_channels=4, kernel_size=6, stride=4, padding=0),
nn.ReLU(),
nn.ConvTranspose1d(in_channels=4, out_channels=1, kernel_size=30, stride=10, padding=0),
nn.ReLU()
)
def forward(self, X):
decoded = self.decode(X)
return decoded.squeeze(1)
# ===== Encoders for 24-D ========#
class LinearEncoder24(nn.Module):
def __init__(self):
super().__init__()
self.encode = nn.Sequential(
nn.Linear(24, 16), nn.ReLU(),
nn.Linear(16, 8), nn.ReLU(),
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class LinearDecoder24(nn.Module):
def __init__(self):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(8, 16), nn.ReLU(),
nn.Linear(16, 24)
)
def forward(self, X):
decoded = self.decode(X)
return decoded
class AutoEncoder(nn.Module):
def __init__(self, conv=True, input_dim=2880):
super().__init__()
if conv:
print("Using ConvEncoder")
self.encoder = ConvEncoder()
self.decoder = ConvDecoder()
else:
if input_dim == 2880:
print("Using LinearEncoder")
self.encoder = LinearEncoder2880()
self.decoder = LinearDecoder2880()
else:
self.encoder = LinearEncoder24()
self.decoder = LinearDecoder24()
def forward(self, X):
encoded = self.encoder(X)
decoded = self.decoder(encoded)
return decoded, encoded
class AutoEncoderNet(NeuralNetRegressor):
def get_loss(self, y_pred, y_true, *args, **kwargs):
decoded, encoded = y_pred # <- unpack the tuple that was returned by `forward`
loss_reconstruction = super().get_loss(decoded, y_true, *args, **kwargs)
loss_l1 = 1e-3 * torch.abs(encoded).sum()
return loss_reconstruction + loss_l1
# In[772]:
import matplotlib.pyplot as plt
def my_plot(epochs, loss):
plt.plot(epochs, loss)
def train(num_epochs,optimizer,criterion,model):
loss_vals= []
for epoch in range(num_epochs):
epoch_loss= []
for i, (images, labels) in enumerate(trainloader):
# rest of the code
loss.backward()
epoch_loss.append(loss.item())
# rest of the code
# rest of the code
loss_vals.append(sum(epoch_loss)/len(epoch_loss))
# rest of the code
# plotting
my_plot(np.linspace(1, num_epochs, num_epochs).astype(int), loss_vals)
my_plot([1, 2, 3, 4, 5], [100, 90, 60, 30, 10])
# In[773]:
# df = pd.read_csv("acm_health_sleep_data-main/processed_hchs/HCHS_per_hour.csv", converters={"raw_pa": lambda x: np.fromstring(x, sep=',')})
df = pd.read_csv("acm_health_sleep_data-main/processed_mesa/MESA_per_hour.csv", converters={"raw_pa": lambda x: np.fromstring(x, sep=',')})
#df = pd.read_csv("mesa/MESA_per_hour.csv", converters={"raw_pa": lambda x: np.fromstring(x, sep=',')})
rawpa = []
for m in range(0, 60):
for s in (0, 30):
rawpa.append("hyp_act_x_%s_%s" % (str(m).zfill(2), str(s).zfill(2)))
df = df[["pid", "ml_sequence", "hyp_time_col", *rawpa]].fillna(0.0).drop_duplicates()
df = df.pivot(["pid", "ml_sequence"], columns=["hyp_time_col"])
df.columns = df.columns.swaplevel(0, 1)
df.sort_index(axis=1, level=0, inplace=True)
### DF hours
dfs = []
for hour in range(0,24):
dfs.append(df[hour].mean(axis=1))
df_hour = pd.concat(dfs, axis=1)
# -
# In[43]:
net = AutoEncoderNet(
AutoEncoder,
max_epochs=20,
lr=0.0000001,
#criterion=nn.MSELoss(),
module__conv=False,
module__input_dim=24,
iterator_train__batch_size=16,
# Shuffle training data on each epoch
iterator_train__shuffle=True,
#device="cuda",
)
#X = df.fillna(0.0).values.astype(np.float32)
df_hour = df_hour.fillna(0.0)
X = df_hour.fillna(0.0).values.astype(np.float32)
scaler = preprocessing.StandardScaler()
X = scaler.fit_transform(X)
net.fit(X, X)
# Problem using the pipeline: It does not scale "Y"
# pipe = Pipeline([
# ('scale', StandardScaler()),
# ('net', net),
# ])
# pipe.fit(X, X)
#X = np.expand_dims(X, axis=1)
#X = df_hour.fillna(0.0).values.astype(np.float32)
#y_proba = net.predict_proba(X)
# -
scaler.inverse_transform(net.predict(X)[0])
#net.forward(X)
# # Tests
# In[7]:
# +
# PLAYGROUND
input = torch.randn(1, 1, 2880)
inputnet = torch.randn(1, 2880)
c = nn.Conv1d(in_channels=1, out_channels=4, kernel_size=20, stride=10, padding=0)
c2 = nn.Conv1d(in_channels=4, out_channels=8, kernel_size=6, stride=4, padding=0)
mp = nn.MaxPool1d(kernel_size=2,stride=2)
res = c(input)
print("CNN1:", res.shape)
res = c2(res)
print("CNN2:", res.shape)
#res = c2(c(input.unsqueeze(1))) #.view(-1).shape
#mp(res).shape, res.shape
#res.view(res.size(0), -1).shape
# print(mp(c2(mp(c(input.unsqueeze(1))))).shape)
#c(input.unsqueeze(0))
e = ConvEncoder()
out = e(inputnet)
print(out.shape)
d = ConvDecoder()
d(out).shape
# dc = nn.ConvTranspose1d(in_channels=8, out_channels=4, kernel_size=6, stride=4, padding=0)
# dec1 = dc(out)
# print(dec1.shape)
# dc2 = nn.ConvTranspose1d(in_channels=4, out_channels=1, kernel_size=10, stride=30, padding=0)
# dc2(dec1).shape
#ae = AutoEncoder()
#ae.fit()
#X.sum(axis=2)
#X.shape
# # Pytorch Lightning <a id='pytorch_lightning'></a>
# ## Encoder/Decoder definition
# In[774]:
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader, random_split, dataset
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.preprocessing import OneHotEncoder
import pytorch_lightning as pl
class AESkipConnection(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(2880, 1440)
self.linear2 = nn.Linear(1440, 360)
self.linear3 = nn.Linear(360, 90)
self.linear4 = nn.Linear(90, 30)
self.linear5 = nn.Linear(30, 90)
self.linear6 = nn.Linear(90, 360)
self.linear7 = nn.Linear(360, 1440)
self.linear8 = nn.Linear(1440, 2880)
def forward(self, X):
### Encoder
l1_out = self.linear1(X)
out = F.relu(l1_out)
l2_out = self.linear2(out)
out = F.relu(l2_out)
l3_out = self.linear3(out)
out = F.relu(l3_out)
l4_out = self.linear4(out)
out = F.relu(l4_out)
### Decoder
out = self.linear5(out)
out += l3_out
out = F.relu(out)
# out = torch.cat((out, l1_out), 1)
out = self.linear6(out)
out += l2_out
out = F.relu(out)
# out = torch.cat((out, l2_out), 1)
out = self.linear7(out)
out += l1_out
out = F.relu(out)
out = self.linear8(out)
out = F.relu(out)
return out
# ===== Encoders for 2280-D ========#
class LinearEncoder2880(nn.Module):
def __init__(self):
super().__init__()
self.encode = nn.Sequential(
nn.Linear(2880, 1440), nn.ReLU(),
nn.Linear(1440, 360), nn.ReLU(),
nn.Linear(360, 90), nn.ReLU(),
nn.Linear(90, 30), nn.ReLU(),
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class LinearDecoder2880(nn.Module):
def __init__(self):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(30, 90), nn.ReLU(),
nn.Linear(90, 360), nn.ReLU(),
nn.Linear(360, 1440), nn.ReLU(),
nn.Linear(1440, 2880),
)
def forward(self, X):
decoded = self.decode(X)
return decoded
class LinearEncoder120(nn.Module):
def __init__(self, label_dim=24):
super().__init__()
self.encode = nn.Sequential(
nn.Linear(120+label_dim, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU(),
nn.Linear(32, 16), nn.ReLU(),
nn.Linear(16, 8), nn.ReLU(),
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class LinearDecoder120(nn.Module):
def __init__(self, label_dim=24):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(8, 16), nn.ReLU(),
nn.Linear(16, 32), nn.ReLU(),
nn.Linear(32, 64), nn.ReLU(),
nn.Linear(64, 120+label_dim), nn.ReLU(),
)
def forward(self, X):
decoded = self.decode(X)
return decoded
class LinearEncoder24(nn.Module):
def __init__(self):
super().__init__()
self.encode = nn.Sequential(
nn.Linear(24, 16), nn.ReLU(),
nn.Linear(16, 8), nn.ReLU(),
nn.Linear(8, 4)
)
def forward(self, X):
encoded = self.encode(X)
return encoded
class LinearDecoder24(nn.Module):
def __init__(self):
super().__init__()
self.decode = nn.Sequential(
nn.Linear(4, 8), nn.ReLU(),
nn.Linear(8, 16), nn.ReLU(),
nn.Linear(16, 24)
)
def forward(self, X):
decoded = self.decode(X)
return decoded
# x = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
# x = layers.MaxPooling2D((2, 2), padding='same')(x)
# x = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# x = layers.MaxPooling2D((2, 2), padding='same')(x)
class ConvEncoder(nn.Module):
def __init__(self):
super().__init__()
self.encode = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=2), # torch.Size([n, 4, 6, 1])
nn.Flatten(), # torch.Size([n, 24])
nn.Linear(24, 8)
)
def forward(self, X):
# print(X.shape)
# print(X)
encoded = self.encode(X)
return encoded
class ConvDecoder(nn.Module):
def __init__(self):
super().__init__()
# Received input size is (n, 168)
# nn.ConvTranspose2d has learnable parameters
# where as UpSample2d does not
self.decode = nn.Sequential(
nn.Linear(8, 24), nn.ReLU(),
nn.Unflatten(dim=1, unflattened_size=(4, 6, 1)), # dim is axis in numpy terms
nn.ConvTranspose2d(in_channels=4, out_channels=2, kernel_size=2, padding=0, stride=2), nn.ReLU(),
nn.ConvTranspose2d(in_channels=2, out_channels=1, kernel_size=2, padding=0, stride=2),
)
def forward(self, X):
decoded = self.decode(X)
return decoded
# In[775]:
# x = torch.randn(1, 1, 24, 4)
# m = nn.Sequential(
# nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2, padding=1), nn.ReLU(),
# nn.MaxPool2d(kernel_size=2),
# nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2, padding=1), nn.ReLU(),
# nn.MaxPool2d(kernel_size=2), # torch.Size([n, 4, 6, 1])
# nn.Flatten(), # torch.Size([n, 96])
# nn.Linear(24, 8),
# nn.Linear(8, 24),
# nn.Unflatten(dim=1, unflattened_size=(4, 6, 1)), # dim is axis in numpy terms
# nn.ConvTranspose2d(in_channels=4, out_channels=2, kernel_size=2, padding=0, stride=2), nn.ReLU(),
# nn.ConvTranspose2d(in_channels=2, out_channels=1, kernel_size=2, padding=0, stride=2), nn.ReLU(),
# )
# m(x).shape
# ## AE Definition
# In[776]:
class LitAutoEncoder(pl.LightningModule):
def __init__(self, input_dim=24):
super().__init__()
# self.skipconnection = AESkipConnection()
if(input_dim == 2880):
self.encoder = LinearEncoder2880()
self.decoder = LinearDecoder2880()
elif(input_dim == 24):
self.encoder = LinearEncoder24()
self.decoder = LinearDecoder24()
else:
self.encoder = ConvEncoder()
self.decoder = ConvDecoder()
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defined the train loop.
# It is independent of forward
x, y = batch
# x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
# x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss, on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-2)
return optimizer
class ResidualAutoEncoder(pl.LightningModule):
def __init__(self):
super().__init__()
self.skipconnection = AESkipConnection()
def forward(self, x):
return self.skipconnection(x)
def training_step(self, batch, batch_idx):
# training_step defined the train loop.
# It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
x_hat = self.skipconnection(x)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
x_hat = self.skipconnection(x)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss, on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-2)
return optimizer
# ## VAE Definition
# In[777]:
class VAE(pl.LightningModule):
"""
Standard VAE with Gaussian Prior and approx posterior.
Example::
# not pretrained
vae = VAE()
"""
def __init__(
self,
enc_out_dim: int = 4,
kl_coeff: float = 0.1,
latent_dim: int = 4,
lr: float = 1e-2,
input_dim: int = 24,
**kwargs
):
"""
Args:
input_height: height of the images
enc_type: option between resnet18 or resnet50
first_conv: use standard kernel_size 7, stride 2 at start or
replace it with kernel_size 3, stride 1 conv
maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2
enc_out_dim: set according to the out_channel count of
encoder used (512 for resnet18, 2048 for resnet50)
kl_coeff: coefficient for kl term of the loss
latent_dim: dim of latent space
lr: learning rate for Adam
"""
super(VAE, self).__init__()
self.save_hyperparameters()
self.lr = lr
self.kl_coeff = kl_coeff
self.enc_out_dim = enc_out_dim
self.latent_dim = latent_dim
self.input_dim = input_dim
if(self.input_dim == 2880):
self.encoder = LinearEncoder2880()
self.decoder = LinearDecoder2880()
elif(self.input_dim == 120):
# CVAE
self.encoder = LinearEncoder120()
self.decoder = LinearDecoder120()
else:
self.encoder = LinearEncoder24()
self.decoder = LinearDecoder24()
self.fc_mu = nn.Linear(self.enc_out_dim, self.latent_dim)
self.fc_var = nn.Linear(self.enc_out_dim, self.latent_dim)
def forward(self, x):
x = self.encoder(x)
# mu = self.fc_mu(x)
# log_var = self.fc_var(x)
# p, q, z = self.sample(mu, log_var)
# return self.decoder(z)
return x
def _run_step(self, x):
x = self.encoder(x)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
p, q, z = self.sample(mu, log_var)
return z, self.decoder(z), p, q
def sample(self, mu, log_var):
std = torch.exp(log_var / 2)
p = torch.distributions.Normal(torch.zeros_like(mu), torch.ones_like(std))
q = torch.distributions.Normal(mu, std)
z = q.rsample()
return p, q, z
def step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z, x_hat, p, q = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl = log_qz - log_pz
kl = kl.mean()
kl *= self.kl_coeff
loss = kl + recon_loss
logs = {
"recon_loss": recon_loss,
"kl": kl,
"loss": loss,
}
return loss, logs
def training_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"train_{k}": v for k, v in logs.items()}, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"val_{k}": v for k, v in logs.items()}, on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
# # Datasets
# In[781]:
# train_ids = pd.read_csv('acm_health_sleep_data-main/processed_hchs/HCHS_pid_train.csv')
# test_ids = pd.read_csv('acm_health_sleep_data-main/processed_hchs/HCHS_pid_test.csv')
train_ids = | pd.read_csv('acm_health_sleep_data-main/processed_mesa/MESA_pid_train.csv') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from dask import dataframe as dd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime, Double, Integer
import featuretools as ft
from featuretools import Timedelta
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import DirectFeature, IdentityFeature
from featuretools.primitives import (
And,
Count,
CumSum,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
LessThanEqualToScalar,
LessThanScalar,
Mean,
Min,
Mode,
Negate,
NMostCommon,
NotEqualScalar,
NumTrue,
Sum,
TimeSinceLast,
Trend
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.tests.testing_utils import backward_path, to_pandas
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library
def test_make_identity(es):
f = IdentityFeature(es['log'].ww['datetime'])
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == datetime(2011, 4, 9, 10, 30, 0))
def test_make_dfeat(es):
f = DirectFeature(ft.Feature(es['customers'].ww['age']),
child_dataframe_name='sessions')
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert (v == 33)
def test_make_agg_feat_of_identity_column(es):
agg_feat = ft.Feature(es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Sum)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 50)
# full_dataframe not supported with Dask
def test_full_dataframe_trans_of_agg(pd_es):
agg_feat = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([1]))
v = df[trans_feat.get_name()].values[0]
assert v == 82
def test_full_dataframe_error_dask(dask_es):
agg_feat = ft.Feature(dask_es['log'].ww['value'], parent_dataframe_name='customers',
primitive=Sum)
trans_feat = ft.Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(dask_es,
time_last=None,
feature_set=feature_set)
error_text = "Cannot use primitives that require full dataframe with Dask"
with pytest.raises(ValueError, match=error_text):
calculator.run(np.array([1]))
def test_make_agg_feat_of_identity_index_column(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 5)
def test_make_agg_feat_where_count(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=IdentityFeature(es['log'].ww['product_id']) == 'coke zero',
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 3)
def test_make_agg_feat_using_prev_time(es):
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
use_previous=Timedelta(10, 's'),
primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 10),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 2)
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert (v == 1)
def test_make_agg_feat_using_prev_n_events(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Distrubuted entitysets do not support use_previous')
agg_feat_1 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(1, 'observations'),
primitive=Min)
agg_feat_2 = ft.Feature(es['log'].ww['value'],
parent_dataframe_name='sessions',
use_previous=Timedelta(3, 'observations'),
primitive=Min)
assert agg_feat_1.get_name() != agg_feat_2.get_name(), \
'Features should have different names based on use_previous'
feature_set = FeatureSet([agg_feat_1, agg_feat_2])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 6),
feature_set=feature_set)
df = calculator.run(np.array([0]))
# time_last is included by default
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 5
assert v2 == 0
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set)
df = calculator.run(np.array([0]))
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 20
assert v2 == 10
def test_make_agg_feat_multiple_dtypes(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('Currently no Dask or Koalas compatible agg prims that use multiple dtypes')
compare_prod = IdentityFeature(es['log'].ww['product_id']) == 'coke zero'
agg_feat = ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Count)
agg_feat2 = ft.Feature(es['log'].ww['product_id'],
parent_dataframe_name='sessions',
where=compare_prod,
primitive=Mode)
feature_set = FeatureSet([agg_feat, agg_feat2])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
v = df[agg_feat.get_name()][0]
v2 = df[agg_feat2.get_name()][0]
assert (v == 3)
assert (v2 == 'coke zero')
def test_make_agg_feat_where_different_identity_feat(es):
feats = []
where_cmps = [LessThanScalar, GreaterThanScalar, LessThanEqualToScalar,
GreaterThanEqualToScalar, EqualScalar, NotEqualScalar]
for where_cmp in where_cmps:
feats.append(ft.Feature(es['log'].ww['id'],
parent_dataframe_name='sessions',
where=ft.Feature(es['log'].ww['datetime'], primitive=where_cmp(datetime(2011, 4, 10, 10, 40, 1))),
primitive=Count))
df = ft.calculate_feature_matrix(entityset=es, features=feats, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, where_cmp in enumerate(where_cmps):
name = feats[i].get_name()
instances = df[name]
v0, v1, v2, v3 = instances[0:4]
if where_cmp == LessThanScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
elif where_cmp == GreaterThanScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 0)
elif where_cmp == LessThanEqualToScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 2)
elif where_cmp == GreaterThanEqualToScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == EqualScalar:
assert (v0 == 0)
assert (v1 == 0)
assert (v2 == 0)
assert (v3 == 1)
elif where_cmp == NotEqualScalar:
assert (v0 == 5)
assert (v1 == 4)
assert (v2 == 1)
assert (v3 == 1)
def test_make_agg_feat_of_grandchild_dataframe(es):
agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[agg_feat.get_name()].values[0]
assert (v == 10)
def test_make_agg_feat_where_count_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=log_count_feat > 1,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1 = instances[0:2]
assert (v0 == 2)
assert (v1 == 2)
def test_make_compare_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
mean_agg_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Mean)
mean_feat = DirectFeature(mean_agg_feat, child_dataframe_name='sessions')
feat = log_count_feat > mean_feat
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1, v2 = instances[0:3]
assert v0
assert v1
assert not v2
def test_make_agg_feat_where_count_and_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat == 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
and_feat = ft.Feature([compare_count, compare_device_type], primitive=And)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=and_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 1)
def test_make_agg_feat_where_count_or_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
compare_count = log_count_feat > 1
compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1
or_feat = compare_count.OR(compare_device_type)
feat = ft.Feature(es['sessions'].ww['id'],
parent_dataframe_name='customers',
where=or_feat,
primitive=Count)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id', int_index=True)
name = feat.get_name()
instances = df[name]
assert (instances.values[0] == 3)
def test_make_agg_feat_of_agg_feat(es):
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
customer_sum_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Sum)
feature_set = FeatureSet([customer_sum_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[customer_sum_feat.get_name()].values[0]
assert (v == 10)
@pytest.fixture
def pd_df():
return pd.DataFrame({
"id": ["a", "b", "c", "d", "e"],
"e1": ["h", "h", "i", "i", "j"],
"e2": ["x", "x", "y", "y", "x"],
"e3": ["z", "z", "z", "z", "z"],
"val": [1, 1, 1, 1, 1]
})
@pytest.fixture
def dd_df(pd_df):
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def ks_df(pd_df):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
return ks.from_pandas(pd_df)
@pytest.fixture(params=['pd_df', 'dd_df', 'ks_df'])
def df(request):
return request.getfixturevalue(request.param)
def test_make_3_stacked_agg_feats(df):
"""
Tests stacking 3 agg features.
The test specifically uses non numeric indices to test how ancestor columns are handled
as dataframes are merged together
"""
if isinstance(df, dd.DataFrame):
pytest.xfail('normalize_datdataframe fails with dask DataFrame')
es = ft.EntitySet()
ltypes = {
'e1': Categorical,
'e2': Categorical,
'e3': Categorical,
'val': Double
}
es.add_dataframe(dataframe=df,
index="id",
dataframe_name="e0",
logical_types=ltypes)
es.normalize_dataframe(base_dataframe_name="e0",
new_dataframe_name="e1",
index="e1",
additional_columns=["e2", "e3"])
es.normalize_dataframe(base_dataframe_name="e1",
new_dataframe_name="e2",
index="e2",
additional_columns=["e3"])
es.normalize_dataframe(base_dataframe_name="e2",
new_dataframe_name="e3",
index="e3")
sum_1 = ft.Feature(es["e0"].ww["val"], parent_dataframe_name="e1", primitive=Sum)
sum_2 = ft.Feature(sum_1, parent_dataframe_name="e2", primitive=Sum)
sum_3 = ft.Feature(sum_2, parent_dataframe_name="e3", primitive=Sum)
feature_set = FeatureSet([sum_3])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(["z"]))
v = df[sum_3.get_name()][0]
assert (v == 5)
def test_make_dfeat_of_agg_feat_on_self(es):
"""
The graph looks like this:
R R = Regions, a parent of customers
|
C C = Customers, the dataframe we're trying to predict on
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on C.
"""
customer_count_feat = ft.Feature(es['customers'].ww['id'], parent_dataframe_name=u'régions', primitive=Count)
num_customers_feat = DirectFeature(customer_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_customers_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_customers_feat.get_name()].values[0]
assert (v == 3)
def test_make_dfeat_of_agg_feat_through_parent(es):
"""
The graph looks like this:
R C = Customers, the dataframe we're trying to predict on
/ \\ R = Regions, a parent of customers
S C S = Stores, a child of regions
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on S.
"""
store_id_feat = IdentityFeature(es['stores'].ww['id'])
store_count_feat = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)
num_stores_feat = DirectFeature(store_count_feat, child_dataframe_name='customers')
feature_set = FeatureSet([num_stores_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[num_stores_feat.get_name()].values[0]
assert (v == 3)
def test_make_deep_agg_feat_of_dfeat_of_agg_feat(es):
"""
The graph looks like this (higher implies parent):
C C = Customers, the dataframe we're trying to predict on
| S = Sessions, a child of Customers
P S L = Log, a child of both Sessions and Log
\\ / P = Products, a parent of Log which is not a descendent of customers
L
We're trying to calculate a DFeat from L to P on an agg_feat of P on L, and
then aggregate it with another agg_feat of C on L.
"""
log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='products', primitive=Count)
product_purchases_feat = DirectFeature(log_count_feat,
child_dataframe_name='log')
purchase_popularity = ft.Feature(product_purchases_feat, parent_dataframe_name='customers', primitive=Mean)
feature_set = FeatureSet([purchase_popularity])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index='id')
v = df[purchase_popularity.get_name()].values[0]
assert (v == 38.0 / 10.0)
def test_deep_agg_feat_chain(es):
"""
Agg feat of agg feat:
region.Mean(customer.Count(Log))
"""
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
region_avg_feat = ft.Feature(customer_count_feat, parent_dataframe_name=u'régions', primitive=Mean)
feature_set = FeatureSet([region_avg_feat])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array(['United States']))
df = to_pandas(df, index='id')
v = df[region_avg_feat.get_name()][0]
assert (v == 17 / 3.)
# NMostCommon not supported with Dask or Koalas
def test_topn(pd_es):
topn = ft.Feature(pd_es['log'].ww['product_id'],
parent_dataframe_name='customers',
primitive=NMostCommon(n=2))
feature_set = FeatureSet([topn])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = pd.DataFrame([
['toothpaste', 'coke zero'],
['coke zero', 'Haribo sugar-free gummy bears'],
['taco clock', np.nan]
])
assert ([name in df.columns for name in topn.get_feature_names()])
for i in range(df.shape[0]):
true = true_results.loc[i]
actual = df.loc[i]
if i == 0:
# coke zero and toothpase have same number of occurrences
assert set(true.values) == set(actual.values)
else:
for i1, i2 in zip(true, actual):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
# Trend not supported with Dask or Koalas
def test_trend(pd_es):
trend = ft.Feature([ft.Feature(pd_es['log'].ww['value']), ft.Feature(pd_es['log'].ww['datetime'])],
parent_dataframe_name='customers',
primitive=Trend)
feature_set = FeatureSet([trend])
calculator = FeatureSetCalculator(pd_es,
time_last=None,
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = [-0.812730, 4.870378, np.nan]
np.testing.assert_almost_equal(df[trend.get_name()].tolist(), true_results, decimal=5)
def test_direct_squared(es):
feature = IdentityFeature(es['log'].ww['value'])
squared = feature * feature
feature_set = FeatureSet([feature, squared])
calculator = FeatureSetCalculator(es,
time_last=None,
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0, 1, 2])))
for i, row in df.iterrows():
assert (row[0] * row[0]) == row[1]
def test_agg_empty_child(es):
customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)
feature_set = FeatureSet([customer_count_feat])
# time last before the customer had any events, so child frame is empty
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])), index='id')
assert df["COUNT(log)"].iloc[0] == 0
def test_diamond_entityset(diamond_es):
es = diamond_es
amount = ft.IdentityFeature(es['transactions'].ww['amount'])
path = backward_path(es, ['regions', 'customers', 'transactions'])
through_customers = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
path = backward_path(es, ['regions', 'stores', 'transactions'])
through_stores = ft.AggregationFeature(amount, 'regions',
primitive=ft.primitives.Sum,
relationship_path=path)
feature_set = FeatureSet([through_customers, through_stores])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index='id', sort_index=True)
assert (df['SUM(stores.transactions.amount)'] == [94, 261, 128]).all()
assert (df['SUM(customers.transactions.amount)'] == [72, 411, 0]).all()
def test_two_relationships_to_single_dataframe(games_es):
es = games_es
home_team, away_team = es.relationships
path = RelationshipPath([(False, home_team)])
mean_at_home = ft.AggregationFeature(ft.Feature(es['games'].ww['home_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
path = RelationshipPath([(False, away_team)])
mean_at_away = ft.AggregationFeature(ft.Feature(es['games'].ww['away_team_score']),
'teams',
relationship_path=path,
primitive=ft.primitives.Mean)
home_team_mean = ft.DirectFeature(mean_at_home, 'games',
relationship=home_team)
away_team_mean = ft.DirectFeature(mean_at_away, 'games',
relationship=away_team)
feature_set = FeatureSet([home_team_mean, away_team_mean])
calculator = FeatureSetCalculator(es,
time_last=datetime(2011, 8, 28),
feature_set=feature_set)
df = calculator.run(np.array(range(3)))
df = to_pandas(df, index='id', sort_index=True)
assert (df[home_team_mean.get_name()] == [1.5, 1.5, 2.5]).all()
assert (df[away_team_mean.get_name()] == [1, 0.5, 2]).all()
@pytest.fixture
def pd_parent_child():
parent_df = pd.DataFrame({"id": [1]})
child_df = pd.DataFrame({"id": [1, 2, 3],
"parent_id": [1, 1, 1],
"time_index": pd.date_range(start='1/1/2018', periods=3),
"value": [10, 5, 2],
"cat": ['a', 'a', 'b']}).astype({'cat': 'category'})
return (parent_df, child_df)
@pytest.fixture
def dd_parent_child(pd_parent_child):
parent_df, child_df = pd_parent_child
parent_df = dd.from_pandas(parent_df, npartitions=2)
child_df = dd.from_pandas(child_df, npartitions=2)
return (parent_df, child_df)
@pytest.fixture
def ks_parent_child(pd_parent_child):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
parent_df, child_df = pd_parent_child
parent_df = ks.from_pandas(parent_df)
child_df = ks.from_pandas(child_df)
return (parent_df, child_df)
@pytest.fixture(params=['pd_parent_child', 'dd_parent_child', 'ks_parent_child'])
def parent_child(request):
return request.getfixturevalue(request.param)
def test_empty_child_dataframe(parent_child):
parent_df, child_df = parent_child
child_ltypes = {
'parent_id': Integer,
'time_index': Datetime,
'value': Double,
'cat': Categorical
}
es = ft.EntitySet(id="blah")
es.add_dataframe(dataframe_name="parent",
dataframe=parent_df,
index="id")
es.add_dataframe(dataframe_name="child",
dataframe=child_df,
index="id",
time_index="time_index",
logical_types=child_ltypes)
es.add_relationship("parent", "id", "child", "parent_id")
# create regular agg
count = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", primitive=Count)
# create agg feature that requires multiple arguments
trend = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww['time_index'])],
parent_dataframe_name="parent",
primitive=Trend)
# create multi-output agg feature
n_most_common = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", primitive=NMostCommon)
# create aggs with where
where = ft.Feature(es["child"].ww["value"]) == 1
count_where = ft.Feature(es["child"].ww["id"], parent_dataframe_name="parent", where=where, primitive=Count)
trend_where = ft.Feature([ft.Feature(es["child"].ww["value"]), ft.Feature(es["child"].ww["time_index"])],
parent_dataframe_name="parent",
where=where,
primitive=Trend)
n_most_common_where = ft.Feature(es["child"].ww["cat"], parent_dataframe_name="parent", where=where, primitive=NMostCommon)
if isinstance(parent_df, pd.DataFrame):
features = [count, count_where, trend, trend_where, n_most_common, n_most_common_where]
data = {count.get_name(): pd.Series([0], dtype="Int64"),
count_where.get_name(): pd.Series([0], dtype="Int64"),
trend.get_name(): pd.Series([np.nan], dtype="float"),
trend_where.get_name(): pd.Series([np.nan], dtype="float")}
for name in n_most_common.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
for name in n_most_common_where.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
else:
features = [count, count_where]
data = {count.get_name(): | pd.Series([0], dtype="Int64") | pandas.Series |
# coding: utf-8
# ## <u> go_chandra - Python </u>
#
# The follwoing code is a script adapted from Gladstone's *go_chandra* IDL script.
#
# The code takes the corrected file from *sso_freeze* (hardwired by user), peforms a corrdinate transformation on the X-ray emission to wrap the PSF around Jupiter and plots the emission of the poles.
# In[1]:
#Purpose: New public Python pipeline used to produce polar plots of Jupiter's X-ray emission over the full observation and/or over defined time
# intervals. IF using plots produced by this pipeline, please cite Weigt et al. (in prep.) where the pipleine is discussed in some
# detail
#Category:
#Authors: <NAME> (<EMAIL>), apadpted from Randy Gladstone's 'gochandra' IDL script
"""All the relevant packages are imported for code below"""
import go_chandra_analysis_tools as gca_tools # import the defined functions to analysis Chandra data nad perfrom coordinate transformations
import custom_cmap as make_me_colors # import custom color map script
import label_maker as make_me_labels # import script to label mutliple subplots
import numpy as np
import pandas as pd
import scipy
from scipy import interpolate
from astropy.io import ascii
from astropy.io import fits as pyfits
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import colors
import matplotlib.gridspec as gridspec
import os
from datetime import *
"""Setup the font used for plotting"""
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['xtick.labelsize']=14
matplotlib.rcParams['ytick.labelsize']=14
matplotlib.rcParams['agg.path.chunksize'] = 1000000
# AU to meter conversion - useful later on (probably a function built in already)
AU_2_m = 1.49598E+11
AU_2_km = 1.49598E+8
# ### Reading in Chandra Event file, extracting all the relevant info and defining assumptions used in analysis <br>
#
# User is prompted to enter the file path of the corrected event file. The script finds the file from the selected folder and reads in all the relevent headers. The asusmptions used for the mapping are also defined here.
# In[2]:
# User prompted to enter the file path of the corrected file
print('')
folder_path = input('Enter file path of event file to be analysed (post correction): ')
print('')
cor_evt_location = []
# Script then searches through the folder looking the filename corresponding to the corrected file
# for file in os.listdir(str(folder_path)):
# if file.startswith("hrcf") and file.endswith("pytest_evt2.fits"):
# cor_evt_location.append(os.path.join(str(folder_path), file))
for file in os.listdir(str(folder_path)):
if file.endswith("pytest_evt2.fits"):
cor_evt_location.append(os.path.join(str(folder_path), file))
detector = os.path.basename(cor_evt_location[0])[0:4]
# File is then read in with relevant header information extracted:
hdulist = pyfits.open(cor_evt_location[0], dtype=float)
matplotlib.rcParams['agg.path.chunksize'] = 10000
img_events=hdulist['EVENTS'].data # the data of the event file
img_head = hdulist[1].header # the header information of the event file
#img_data = hdulist[1].data
bigtime = img_events['time'] # time
bigxarr = img_events['X'] # x position of photons
bigyarr = img_events['Y'] # y position of photons
bigchannel = img_events['pha'] # pha channel the photons were found in
obs_id = img_head['OBS_ID'] # observation id of the event
tstart = img_head['TSTART'] # the start and...
tend = img_head['TSTOP'] #... end time of the observation
# The date of the observation is read in...
datestart = img_head['DATE-OBS']
evt_date = pd.to_datetime(datestart) #... and coverted to datetiem format to allow the relevant information to be read to...
evt_hour = evt_date.hour
evt_doy = evt_date.strftime('%j')
evt_mins = evt_date.minute
evt_secs = evt_date.second
evt_DOYFRAC = gca_tools.doy_frac(float(evt_doy), float(evt_hour), float(evt_mins), float(evt_secs)) #... calculated a fractional Day of
# Year (DOY) of the observation
ra_centre, ra_centre_rad = img_head['RA_NOM'], np.deg2rad(img_head['RA_NOM']) # the RA of Jupiter at the centre of the chip is read in as...
dec_centre, dec_centre_rad = img_head['DEC_NOM'], np.deg2rad(img_head['DEC_NOM']) #... well as Jupitr's DEC
j_rotrate = np.rad2deg(1.758533641E-4) # Jupiter's rotation period
#sat_rotrate = np.rad2deg(1.637884058E-4) # Saturn's rotation period
hdulist.close()
# Assumptions used for mapping:
if detector == 'acis':
scale = 0.4920
fwhm = 0.8 # FWHM of the HRC-I point spread function (PSF) - in units of arcsec
psfsize = 25 # size of PSF used - in units of arcsec
alt = 400 # altitude where X-ray emission assumers to occur in Jupiter's ionosphere - in units of km
else:
scale = 0.13175 # scale used when observing Jupiter using Chandra - in units of arcsec/pixel
fwhm = 0.8 # FWHM of the HRC-I point spread function (PSF) - in units of arcsec
psfsize = 25 # size of PSF used - in units of arcsec
alt = 400 # altitude where X-ray emission assumers to occur in Jupiter's ionosphere - in units of km
# ### Reading in Jupiter Horizon's file
#
# Alogrithm uses the start and end date from the observation to generate an epheremis file (from the JPL Horizons server) to use for analysis. The ephermeris file used takes CXO as the observer
# In[3]:
"""Brad's horizons code to extract the ephemeris file"""
from astropy.time import Time #convert between different time coordinates
from astropy.time import TimeDelta #add/subtract time intervals
#-*- coding: utf-8 -*-
from astroquery.jplhorizons import Horizons #automatically download ephemeris
#Need to do this to fix astroquery bug, otherwise it won't find the ephemeris data
from astroquery.jplhorizons import conf
conf.horizons_server = 'https://ssd.jpl.nasa.gov/horizons_batch.cgi'
# The start and end times are taken from the horizons file.
tstart_eph=Time(tstart, format='cxcsec')
tstop_eph=Time(tend, format='cxcsec')
eph_tstart = Time(tstart_eph, out_subfmt='date_hm')
dt = TimeDelta(0.125, format='jd')
eph_tstop = Time(tstop_eph + dt, out_subfmt='date_hm')
# Below sets the parameters of what observer the ephemeris file is generated form. For example, '500' = centre of the Earth, '500@-151' = CXO
obj = Horizons(id=599,location='500@-151',epochs={'start':eph_tstart.iso, 'stop':eph_tstop.iso, 'step':'1m'}, id_type='majorbody')
eph_jup = obj.ephemerides()
# Extracts relevent information needed from ephermeris file
cml_spline_jup = scipy.interpolate.UnivariateSpline(eph_jup['datetime_jd'], eph_jup['PDObsLon'],k=1)
lt_jup = eph_jup['lighttime']
sub_obs_lon_jup = eph_jup['PDObsLon']
sub_obs_lat_jup = eph_jup['PDObsLat']
eph_dates = pd.to_datetime(eph_jup['datetime_str'])
eph_dates = pd.DatetimeIndex(eph_dates)
eph_doy = np.array(eph_dates.strftime('%j')).astype(int)
eph_hours = eph_dates.hour
eph_minutes = eph_dates.minute
eph_seconds = eph_dates.second
eph_DOYFRAC_jup = gca_tools.doy_frac(eph_doy, eph_hours, eph_minutes, eph_seconds) # DOY fraction from ephermeris data
jup_time = (eph_DOYFRAC_jup - evt_DOYFRAC)*86400.0 + tstart # local tiem of Jupiter
# ### Select Region for analysis
#
# Plots the photons (x,y) position on a grid of defined size in arcseconds (defualted at [-50,50] in both x and y). Jupiter is centred on the HRC instrument. The photon information form the defined
# In[4]:
# converting the x and y coordinates from the event file into arcseconds
# Aimpoint of observations -> HRC: (16384.5, 16384.5), ACIS: (4096.5, 4096.5)
if detector == 'acis':
bigxarr_region = (bigxarr - 4096.5)*scale
bigyarr_region = (bigyarr - 4096.5)*scale
xlimits, ylimits = [-30,30], [-30,30]
else:
bigxarr_region = (bigxarr - 16384.5)*scale
bigyarr_region = (bigyarr - 16384.5)*scale
xlimits, ylimits = [-50,50], [-50,50]
# define the x, y, and pha channel limits (0-90 is default here)
cha_min = 0
cha_max = 90 # default 90
# the photon data is stored in a pandas dataframe
evt_df = | pd.DataFrame({'time': bigtime, 'x': bigxarr, 'y': bigyarr, 'pha': bigchannel}) | pandas.DataFrame |
import ffn
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal as aae
try:
df = pd.read_csv('tests/data/test_data.csv', index_col=0, parse_dates=True)
except FileNotFoundError as e:
try:
df = pd.read_csv('data/test_data.csv', index_col=0, parse_dates=True)
except FileNotFoundError as e2:
raise(str(e2))
ts = df['AAPL'][0:10]
def test_to_returns_ts():
data = ts
actual = data.to_returns()
assert len(actual) == len(data)
assert np.isnan(actual[0])
aae(actual[1], -0.019, 3)
aae(actual[9], -0.022, 3)
def test_to_returns_df():
data = df
actual = data.to_returns()
assert len(actual) == len(data)
assert all(np.isnan(actual.iloc[0]))
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['AAPL'][9], -0.022, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['MSFT'][9], -0.014, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['C'][9], 0.004, 3)
def test_to_log_returns_ts():
data = ts
actual = data.to_log_returns()
assert len(actual) == len(data)
assert np.isnan(actual[0])
aae(actual[1], -0.019, 3)
aae(actual[9], -0.022, 3)
def test_to_log_returns_df():
data = df
actual = data.to_log_returns()
assert len(actual) == len(data)
assert all(np.isnan(actual.iloc[0]))
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['AAPL'][9], -0.022, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['MSFT'][9], -0.014, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['C'][9], 0.004, 3)
def test_to_price_index():
data = df
rets = data.to_returns()
actual = rets.to_price_index()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 100, 3)
aae(actual['MSFT'][0], 100, 3)
aae(actual['C'][0], 100, 3)
aae(actual['AAPL'][9], 91.366, 3)
aae(actual['MSFT'][9], 95.191, 3)
aae(actual['C'][9], 101.199, 3)
actual = rets.to_price_index(start=1)
assert len(actual) == len(data)
aae(actual['AAPL'][0], 1, 3)
aae(actual['MSFT'][0], 1, 3)
aae(actual['C'][0], 1, 3)
aae(actual['AAPL'][9], 0.914, 3)
aae(actual['MSFT'][9], 0.952, 3)
aae(actual['C'][9], 1.012, 3)
def test_rebase():
data = df
actual = data.rebase()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 100, 3)
aae(actual['MSFT'][0], 100, 3)
aae(actual['C'][0], 100, 3)
aae(actual['AAPL'][9], 91.366, 3)
aae(actual['MSFT'][9], 95.191, 3)
aae(actual['C'][9], 101.199, 3)
def test_to_drawdown_series_ts():
data = ts
actual = data.to_drawdown_series()
assert len(actual) == len(data)
aae(actual[0], 0, 3)
aae(actual[1], -0.019, 3)
aae(actual[9], -0.086, 3)
def test_to_drawdown_series_df():
data = df
actual = data.to_drawdown_series()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 0, 3)
aae(actual['MSFT'][0], 0, 3)
aae(actual['C'][0], 0, 3)
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['AAPL'][9], -0.086, 3)
aae(actual['MSFT'][9], -0.048, 3)
aae(actual['C'][9], -0.029, 3)
def test_max_drawdown_ts():
data = ts
actual = data.calc_max_drawdown()
aae(actual, -0.086, 3)
def test_max_drawdown_df():
data = df
data = data[0:10]
actual = data.calc_max_drawdown()
aae(actual['AAPL'], -0.086, 3)
aae(actual['MSFT'], -0.048, 3)
aae(actual['C'], -0.033, 3)
def test_year_frac():
actual = ffn.year_frac(pd.to_datetime('2004-03-10'),
pd.to_datetime('2004-03-29'))
# not exactly the same as excel but close enough
aae(actual, 0.0520, 4)
def test_cagr_ts():
data = ts
actual = data.calc_cagr()
aae(actual, -0.921, 3)
def test_cagr_df():
data = df
actual = data.calc_cagr()
aae(actual['AAPL'], 0.440, 3)
aae(actual['MSFT'], 0.041, 3)
aae(actual['C'], -0.205, 3)
def test_merge():
a = pd.Series(index=pd.date_range('2010-01-01', periods=5),
data=100, name='a')
b = pd.Series(index=pd.date_range('2010-01-02', periods=5),
data=200, name='b')
actual = ffn.merge(a, b)
assert 'a' in actual
assert 'b' in actual
assert len(actual) == 6
assert len(actual.columns) == 2
assert np.isnan(actual['a'][-1])
assert np.isnan(actual['b'][0])
assert actual['a'][0] == 100
assert actual['a'][1] == 100
assert actual['b'][-1] == 200
assert actual['b'][1] == 200
old = actual
old.columns = ['c', 'd']
actual = ffn.merge(old, a, b)
assert 'a' in actual
assert 'b' in actual
assert 'c' in actual
assert 'd' in actual
assert len(actual) == 6
assert len(actual.columns) == 4
assert np.isnan(actual['a'][-1])
assert np.isnan(actual['b'][0])
assert actual['a'][0] == 100
assert actual['a'][1] == 100
assert actual['b'][-1] == 200
assert actual['b'][1] == 200
def test_calc_inv_vol_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_inv_vol_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.218, 3)
aae(actual['MSFT'], 0.464, 3)
aae(actual['C'], 0.318, 3)
def test_calc_mean_var_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_mean_var_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.000, 3)
aae(actual['MSFT'], 0.000, 3)
aae(actual['C'], 1.000, 3)
def test_calc_erc_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_erc_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.270, 3)
aae(actual['MSFT'], 0.374, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='ledoit-wolf',
risk_parity_method='slsqp',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.270, 3)
aae(actual['MSFT'], 0.374, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='standard',
risk_parity_method='ccd',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.234, 3)
aae(actual['MSFT'], 0.409, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='standard',
risk_parity_method='slsqp',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.234, 3)
aae(actual['MSFT'], 0.409, 3)
aae(actual['C'], 0.356, 3)
def test_calc_total_return():
prc = df.iloc[0:11]
actual = prc.calc_total_return()
assert len(actual) == 3
aae(actual['AAPL'], -0.079, 3)
aae(actual['MSFT'], -0.038, 3)
aae(actual['C'], 0.012, 3)
def test_get_num_days_required():
actual = ffn.core.get_num_days_required(pd.DateOffset(months=3),
perc_required=1.)
assert actual >= 60
actual = ffn.core.get_num_days_required(pd.DateOffset(months=3),
perc_required=1.,
period='m')
assert actual >= 3
def test_asfreq_actual():
a = pd.Series({pd.to_datetime('2010-02-27'): 100,
pd.to_datetime('2010-03-25'): 200})
actual = a.asfreq_actual(freq='M', method='ffill')
assert len(actual) == 1
assert '2010-02-27' in actual
def test_to_monthly():
a = pd.Series(range(100), index=pd.date_range(
'2010-01-01', periods=100))
# to test for actual dates
a['2010-01-31'] = np.nan
a = a.dropna()
actual = a.to_monthly()
assert len(actual) == 3
assert '2010-01-30' in actual
assert actual['2010-01-30'] == 29
def test_drop_duplicate_cols():
a = pd.Series(index=pd.date_range('2010-01-01', periods=5),
data=100, name='a')
# second version of a w/ less data
a2 = pd.Series(index=pd.date_range('2010-01-02', periods=4),
data=900, name='a')
b = pd.Series(index=pd.date_range('2010-01-02', periods=5),
data=200, name='b')
actual = ffn.merge(a, a2, b)
assert actual['a'].shape[1] == 2
assert len(actual.columns) == 3
actual = actual.drop_duplicate_cols()
assert len(actual.columns) == 2
assert 'a' in actual
assert 'b' in actual
assert len(actual['a'].dropna()) == 5
def test_limit_weights():
w = {'a': 0.3, 'b': 0.1,
'c': 0.05, 'd': 0.05, 'e': 0.5}
actual_exp = {'a': 0.3, 'b': 0.2, 'c': 0.1,
'd': 0.1, 'e': 0.3}
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
for k in actual_exp:
assert actual[k] == actual_exp[k]
w = pd.Series(w)
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
for k in actual_exp:
assert actual[k] == actual_exp[k]
w = pd.Series({'a': 0.29, 'b': 0.1,
'c': 0.06, 'd': 0.05, 'e': 0.5})
assert w.sum() == 1.0
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
assert all(x <= 0.3 for x in actual)
aae(actual['a'], 0.300, 3)
aae(actual['b'], 0.190, 3)
aae(actual['c'], 0.114, 3)
aae(actual['d'], 0.095, 3)
aae(actual['e'], 0.300, 3)
def test_random_weights():
n = 10
bounds = (0., 1.)
tot = 1.0000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(lambda x: (x >= low and x <= high)).all().all()
n = 4
bounds = (0., 0.25)
tot = 1.0000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
n = 7
bounds = (0., 0.25)
tot = 0.8000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
n = 10
bounds = (-.25, 0.25)
tot = 0.0
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
def test_random_weights_throws_error():
try:
ffn.random_weights(2, (0., 0.25), 1.0)
assert False
except ValueError:
assert True
try:
ffn.random_weights(10, (0.5, 0.25), 1.0)
assert False
except ValueError:
assert True
try:
ffn.random_weights(10, (0.5, 0.75), 0.2)
assert False
except ValueError:
assert True
def test_rollapply():
a = pd.Series([1, 2, 3, 4, 5])
actual = a.rollapply(3, np.mean)
assert np.isnan(actual[0])
assert np.isnan(actual[1])
assert actual[2] == 2
assert actual[3] == 3
assert actual[4] == 4
b = pd.DataFrame({'a': a, 'b': a})
actual = b.rollapply(3, np.mean)
assert all(np.isnan(actual.iloc[0]))
assert all(np.isnan(actual.iloc[1]))
assert all(actual.iloc[2] == 2)
assert all(actual.iloc[3] == 3)
assert all(actual.iloc[4] == 4)
def test_winsorize():
x = pd.Series(range(20), dtype='float')
res = x.winsorize(limits=0.05)
assert res.iloc[0] == 1
assert res.iloc[-1] == 18
# make sure initial values still intact
assert x.iloc[0] == 0
assert x.iloc[-1] == 19
x = pd.DataFrame({
'a': pd.Series(range(20), dtype='float'),
'b': pd.Series(range(20), dtype='float')
})
res = x.winsorize(axis=0, limits=0.05)
assert res['a'].iloc[0] == 1
assert res['b'].iloc[0] == 1
assert res['a'].iloc[-1] == 18
assert res['b'].iloc[-1] == 18
assert x['a'].iloc[0] == 0
assert x['b'].iloc[0] == 0
assert x['a'].iloc[-1] == 19
assert x['b'].iloc[-1] == 19
def test_rescale():
x = pd.Series(range(10), dtype='float')
res = x.rescale()
assert res.iloc[0] == 0
assert res.iloc[4] == (4. - 0.) / (9. - 0.)
assert res.iloc[-1] == 1
assert x.iloc[0] == 0
assert x.iloc[4] == 4
assert x.iloc[-1] == 9
x = pd.DataFrame({
'a': pd.Series(range(10), dtype='float'),
'b': pd.Series(range(10), dtype='float')
})
res = x.rescale(axis=0)
assert res['a'].iloc[0] == 0
assert res['a'].iloc[4] == (4. - 0.) / (9. - 0.)
assert res['a'].iloc[-1] == 1
assert res['b'].iloc[0] == 0
assert res['b'].iloc[4] == (4. - 0.) / (9. - 0.)
assert res['b'].iloc[-1] == 1
assert x['a'].iloc[0] == 0
assert x['a'].iloc[4] == 4
assert x['a'].iloc[-1] == 9
assert x['b'].iloc[0] == 0
assert x['b'].iloc[4] == 4
assert x['b'].iloc[-1] == 9
def test_annualize():
assert ffn.annualize(0.1, 60) == (1.1 ** (1. / (60. / 365)) - 1)
def test_calc_sortino_ratio():
rf = 0
p = 1
r = df.to_returns()
a = r.calc_sortino_ratio(rf=rf, nperiods=p)
negative_returns = np.minimum(r[1:], 0)
assert np.allclose(a, np.divide((r.mean() - rf), np.std(negative_returns, ddof=1)) * np.sqrt(p))
a = r.calc_sortino_ratio()
negative_returns = np.minimum(r[1:], 0)
assert np.allclose(a, np.divide((r.mean() - rf), np.std(negative_returns, ddof=1)) * np.sqrt(p))
rf = 0.02
p = 252
r = df.to_returns()
er = r.to_excess_returns(rf, nperiods=p)
a = r.calc_sortino_ratio(rf=rf, nperiods=p)
negative_returns = np.minimum(r[1:], 0)
assert np.allclose(a, np.divide(er.mean(), np.std(negative_returns, ddof=1)) * np.sqrt(p))
def test_calmar_ratio():
cagr = df.calc_cagr()
mdd = df.calc_max_drawdown()
a = df.calc_calmar_ratio()
assert np.allclose(a, cagr / abs(mdd))
def test_calc_stats():
# test twelve_month_win_perc divide by zero
prices = df.C['2010-10-01':'2011-08-01']
stats = ffn.calc_stats(prices).stats
assert pd.isnull(stats['twelve_month_win_perc'])
prices = df.C['2009-10-01':'2011-08-01']
stats = ffn.calc_stats(prices).stats
assert not pd.isnull(stats['twelve_month_win_perc'])
# test yearly_sharpe divide by zero
prices = df.C['2009-01-01':'2012-01-01']
stats = ffn.calc_stats(prices).stats
assert 'yearly_sharpe' in stats.index
prices[prices > 0.0] = 1.0
# throws warnings
stats = ffn.calc_stats(prices).stats
assert | pd.isnull(stats['yearly_sharpe']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""DiamondRegression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1NPXMsi1hxVlY2f0dRGNVwuoMmMSaeFWP
"""
import pandas as pd
df = pd.read_csv("diamonds.csv")
df.head()
cuts = {'Ideal': 0,'Premium': 1, 'Very Good': 2, "Good": 3, "Fair":4}
colors = dict(zip('DEFGHIJ',range(7)))
clarity = {'SI2':5, 'SI1':4, 'VS1':2, 'VS2':3, 'VVS2':1, 'VVS1':0, 'I1':6, 'IF':7}
df['cut_n'] = df['cut'].apply(lambda x: cuts[x])
df['color_n'] = df['color'].apply(lambda x: colors[x])
df['clarity_n'] = df['clarity'].apply(lambda x: clarity[x])
X = df[["depth","table", 'carat','cut_n','color_n','clarity_n']]
Y = df['price']
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
coef = reg.coef_
intercept = reg.intercept_
print(coef)
print(intercept)
pred = pd.DataFrame(reg.predict(X_test))
Y_test_df = pd.DataFrame(Y_test)
Y_test_df.reset_index(drop = True, inplace= True)
comparison = pd.concat([Y_test_df,pred], axis = 1)
comparison
pred2 = reg.predict(X_test)
err = pd.Series(Y_test) - [p[0]for p in pred2]
err.hist(bins=100)
#errors are normally distributed and symetrical
err.describe()
import statistics as stats
def rmse(errors):
return(pow(stats.mean([pow(e,2) for e in errors]),0.5))
rmse(err)
#Lets Repeat but with only the four C's
X = df[['carat','cut_n','color_n','clarity_n']]
Y = df['price']
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
coef = reg.coef_
intercept = reg.intercept_
print(coef)
print(intercept)
pred = pd.DataFrame(reg.predict(X_test))
Y_test_df = pd.DataFrame(Y_test)
Y_test_df.reset_index(drop = True, inplace= True)
comparison = pd.concat([Y_test_df,pred], axis = 1)
comparison
pred2 = reg.predict(X_test)
err = pd.Series(Y_test) - [p[0]for p in pred2]
err.hist(bins=100)
rmse(err)
#now with get dummies?
colors = pd.get_dummies(df.color, prefix="Color", dtype="int")
cuts = pd.get_dummies(df.cut, prefix = "Cut", dtype="int")
clarities = pd.get_dummies(df.clarity, prefix = "Clarity", dtype="int")
main = colors.merge(cuts, how = "inner", left_index=True,right_index=True)
main = main.merge(clarities, how = "inner", left_index=True,right_index=True)
df_dummies = df.drop(columns= ["cut", "color", "clarity", "depth", "table", "x" ,"y" ,"z", "cut_n", "color_n", "clarity_n"])
df_dummies = df_dummies.merge(main, how = "inner", left_index = True, right_index = True)
X = df_dummies.drop(columns = ["Unnamed: 0", "price"])
Y = df_dummies['price']
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
reg = linear_model.LinearRegression()
reg.fit( | pd.DataFrame(X_train) | pandas.DataFrame |
import time
import config
import os
import mysql.connector
import pandas as pd
from WindPy import w
from importlib import resources
from helper.mysql_dbconnection import mysql_dbconnection
from helper.upload_github import upload_github
with resources.path('helper', 'mysql.cfg') as p:
resource_path = str(p)
if os.path.isfile('mysql.cfg'):
cfg = config.Config('mysql.cfg')
else:
cfg = config.Config(resource_path)
class Loader:
def __init__(self, start_date, end_date, database, table_name, field, options, sector=None):
db_engine = mysql_dbconnection(database=database)
self._start_date = start_date
self._end_date = end_date
self._db_engine = db_engine
self._table_name = table_name
self._sector = sector
self._field = field
self._options = options
self._cfg = cfg
@property
def current_time(self):
return time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(time.time()))
def __error_logger(self, wind_code, status, info=None):
"""
Log the errors occuring when retriving or saving data
:param wind_code: str, wind code of the present security
:param status: status parameters, e.g. the ErrorCode returned by Wind API
:return: None
"""
error_log = | pd.DataFrame(index=[wind_code]) | pandas.DataFrame |
import zmq
from datetime import datetime
import threading
from posttroll.message import Message
import os
import os.path
import pandas as pd
SDR_PUBLISHER = "tcp://viirscollector:29092"
PICKLE_DIR = "/viirs/pickle"
SDR_PICKLE = os.path.join(PICKLE_DIR, "sdr2.pickle")
class SdrSubscriber(threading.Thread):
def __init__(self, context):
threading.Thread.__init__(self)
self.socket = context.socket(zmq.SUB)
self.socket.setsockopt(zmq.TCP_KEEPALIVE, 1)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 60)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 20)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 60)
self.socket.setsockopt_string(zmq.SUBSCRIBE, "pytroll://AVO/viirs/sdr")
self.socket.connect(SDR_PUBLISHER)
self.lock = threading.Lock()
self.initalize()
def initalize(self):
if os.path.exists(SDR_PICKLE):
print("loading {}".format(SDR_PICKLE))
self._sdrs = | pd.read_pickle(SDR_PICKLE) | pandas.read_pickle |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
assert_frame_equal(result, actual)
def test_pivot_sort_by_appearance(df_checks):
"""Test output if sort_by_appearance is True."""
result = df_checks.pivot_longer(
column_names="ht*",
names_to="dim",
values_to="num",
sort_by_appearance=True,
)
actual = (
df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
.sort_index()
.reset_index(drop=True)
)
assert_frame_equal(result, actual)
def test_names_pat_str(df_checks):
"""
Test output when names_pattern is a string,
and .value is present.
"""
result = (
df_checks.pivot_longer(
column_names="ht*",
names_to=(".value", "age"),
names_pattern="(.+)(.)",
sort_by_appearance=True,
)
.reindex(columns=["famid", "birth", "age", "ht"])
.astype({"age": int})
)
actual = pd.wide_to_long(
df_checks, stubnames="ht", i=["famid", "birth"], j="age"
).reset_index()
assert_frame_equal(result, actual)
def test_multiindex_column_level(df_multi):
"""
Test output from MultiIndex column,
when column_level is provided.
"""
result = df_multi.pivot_longer(
index="name", column_names="names", column_level=0
)
expected_output = df_multi.melt(
id_vars="name", value_vars="names", col_level=0
)
assert_frame_equal(result, expected_output)
def test_multiindex(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
and there is no names_sep/names_pattern.
"""
result = df_multi.pivot_longer(index=[("name", "a")])
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to(df_multi):
"""
Test output from MultiIndex column,
where column_level is not provided,
there is no names_sep/names_pattern,
and names_to is provided as a sequence.
"""
result = df_multi.pivot_longer(
index=[("name", "a")], names_to=["variable_0", "variable_1"]
)
expected_output = df_multi.melt(id_vars=[("name", "a")])
assert_frame_equal(result, expected_output)
def test_multiindex_names_to_length_mismatch(df_multi):
"""
Raise error if the length of names_to does not
match the number of column levels.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_to=["variable_0", "variable_1", "variable_2"],
)
def test_multiindex_incomplete_level_names(df_multi):
"""
Raise error if not all the levels have names.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_multiindex_index_level_names_intersection(df_multi):
"""
Raise error if level names exist in index.
"""
with pytest.raises(ValueError):
df_multi.columns.names = [None, "a"]
df_multi.pivot_longer(index=[("name", "a")])
def test_no_column_names(df_checks):
"""
Test output if all the columns
are assigned to the index parameter.
"""
assert_frame_equal(
df_checks.pivot_longer(df_checks.columns).rename_axis(columns=None),
df_checks,
)
@pytest.fixture
def test_df():
"""Fixture DataFrame"""
return pd.DataFrame(
{
"off_loc": ["A", "B", "C", "D", "E", "F"],
"pt_loc": ["G", "H", "I", "J", "K", "L"],
"pt_lat": [
100.07548220000001,
75.191326,
122.65134479999999,
124.13553329999999,
124.13553329999999,
124.01028909999998,
],
"off_lat": [
121.271083,
75.93845266,
135.043791,
134.51128400000002,
134.484374,
137.962195,
],
"pt_long": [
4.472089953,
-144.387785,
-40.45611048,
-46.07156181,
-46.07156181,
-46.01594293,
],
"off_long": [
-7.188632000000001,
-143.2288569,
21.242563,
40.937416999999996,
40.78472,
22.905889000000002,
],
}
)
def test_names_pattern_str(test_df):
"""Test output for names_pattern and .value."""
result = test_df.pivot_longer(
column_names="*_*",
names_to=["set", ".value"],
names_pattern="(.+)_(.+)",
sort_by_appearance=True,
)
actual = test_df.copy()
actual.columns = actual.columns.str.split("_").str[::-1].str.join("_")
actual = (
pd.wide_to_long(
actual.reset_index(),
stubnames=["loc", "lat", "long"],
sep="_",
i="index",
j="set",
suffix=r".+",
)
.reset_index("set")
.reset_index(drop=True)
)
| assert_frame_equal(result, actual) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
class Tracker2:
def __init__(self, means, T, store_rewards_arm=False):
"""
:param means: means for the different arms.
:param T: horizon.
:param store_rewards_arm: storing the rewards for the different arms.
"""
self.means = means.reshape(1, len(means))
self.nb_arms = means.shape[0]
self.T = T
self.Sa = np.zeros(self.nb_arms)
self.Na = np.zeros(self.nb_arms)
self.reward = np.zeros(self.T)
self.arm_sequence = np.empty(self.T, dtype=int)
self.t = 0
self.store_rewards_arm = store_rewards_arm
if store_rewards_arm:
self.rewards_arm = [[] for _ in range(self.nb_arms)]
def reset(self):
"""
Initialization of quantities of interest used for all methods
:param T: int, time horizon
:return: - Sa: np.array, cumulative reward for the different arms
- Na: np.array, number of times arm the different arms have been pulled
- reward: np.array, rewards
- arm_sequence: np.array, arm chosen at each step
"""
self.Sa = np.zeros(self.nb_arms)
self.Na = np.zeros(self.nb_arms)
self.reward = np.zeros(self.T)
self.arm_sequence = np.zeros(self.T, dtype=int)
self.rewards_arm = [[]]*self.nb_arms
if self.store_rewards_arm:
self.rewards_arm = [[] for _ in range(self.nb_arms)]
def update(self, t, arm, reward):
"""
Update all the parameters of interest after choosing a given arm
:param t: int, current time/round
:param arm: int, arm chose at this round
:param Sa: np.array, cumulative reward array up to time t-1
:param Na: np.array, number of times arm has been pulled up to time t-1
:param reward: np.array, rewards obtained with the policy up to time t-1
:param arm_sequence: np.array, arm chose at each step up to time t-1
"""
self.Na[arm] += 1
self.arm_sequence[t] = arm
self.reward[t] = reward
self.Sa[arm] += reward
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
def regret(self):
"""
Compute the regret on a single trajectory.
Should be launched after playing T steps.
:param reward: np.array, the array of reward obtained from the policy up to time T
:param T: int, time horizon
:return: np.array, cumulative regret for a single experiment
"""
return self.means.max() * np.arange(1, self.T + 1) - np.cumsum(np.array(self.means)[self.arm_sequence])
class TrackerEXP3(Tracker2):
def __init__(self, means, T, gamma, alpha, store_rewards_arm=False):
super(TrackerEXP3, self).__init__(means, T, store_rewards_arm)
self.w = np.ones(self.nb_arms)
self.p = np.ones(self.nb_arms)*1/self.nb_arms
self.gamma = gamma
self.alpha = alpha
self.time_changes = [0]
def update(self, t, arm, reward):
sum_weights = self.w.sum()
self.p = (1-self.gamma)/sum_weights*self.w + self.gamma/self.nb_arms
xi_hat = np.zeros(self.nb_arms)
xi_hat[arm] = self.gamma * reward/(self.p[arm]*self.nb_arms)
self.w = self.w * np.exp(xi_hat) + np.exp(1)*self.alpha/self.nb_arms * sum_weights
self.reward[t] = reward
self.arm_sequence[t] = arm
self.t = t
def regret(self):
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class TrackerREXP3(Tracker2):
def __init__(self, means, T, gamma, store_rewards_arm=True):
super(TrackerREXP3, self).__init__(means, T, store_rewards_arm)
self.w = np.ones(self.nb_arms)
self.p = np.ones(self.nb_arms)*1/self.nb_arms
self.gamma = gamma
self.time_changes = [0]
def update(self, t, arm, reward):
sum_weights = self.w.sum()
self.p = (1-self.gamma)/sum_weights*self.w + self.gamma/self.nb_arms
xi_hat = np.zeros(self.nb_arms)
xi_hat[arm] = self.gamma * reward/(self.p[arm]*self.nb_arms)
self.w = self.w * np.exp(xi_hat)
self.reward[t] = reward
self.arm_sequence[t] = arm
self.t = t
def restartREXP3(self):
self.w = np.ones(self.nb_arms)
def regret(self):
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
def regret_old(self):
"""
Compute the regret of a single experiment
:param reward: np.array, the array of reward obtained from the policy up to time T
:param T: int, time horizon
:return: np.array, cumulative regret for a single experiment
"""
means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))
return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])
class TrackerDTS(Tracker2):
def __init__(self, means, T, gamma, store_rewards_arm=False):
super(TrackerDTS, self).__init__(means, T, store_rewards_arm)
self.gamma = gamma
self.time_changes = [0]
self.S = np.zeros(self.nb_arms)
self.F = np.zeros(self.nb_arms)
self.nb_draws = np.zeros(self.nb_arms)
self.t = 0
def update(self, t, arm, reward):
self.S = self.gamma*self.S
self.F = self.gamma*self.F
self.nb_draws = self.gamma*self.nb_draws
self.S[arm] += reward
self.reward[t] = reward
self.F[arm] += 1-reward
self.nb_draws[arm] += 1
self.arm_sequence[t] = arm
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
def regret(self):
"""
:return:
"""
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class TrackerSWTS(Tracker2):
def __init__(self, means, T, tau, store_rewards_arm=False):
super(TrackerSWTS, self).__init__(means, T, store_rewards_arm)
self.tau = tau
self.time_changes = [0]
self.S = np.zeros(self.nb_arms)
self.F = np.zeros(self.nb_arms)
self.nb_draws = np.zeros(self.nb_arms)
self.t = 0
def update(self, t, arm, reward):
self.S[arm] += reward
self.F[arm] += 1-reward
self.nb_draws[arm] += 1
self.arm_sequence[t] = arm
self.t = t
self.reward[t] = reward
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
if t >= self.tau:
first_t = int(t - self.tau)
first_element = int(self.arm_sequence[first_t])
self.S[first_element] -= self.reward[first_t]
self.F[first_element] -= (1-self.reward[first_t])
self.nb_draws[first_element] -= 1
def regret(self):
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class TrackerCUSUM(Tracker2):
def __init__(self, means, T, M, eps, h, store_rewards_arm=False):
super(TrackerCUSUM, self).__init__(means, T, store_rewards_arm)
self.time_changes = [0]
self.count = M * np.ones(self.nb_arms)
self.M = M
self.M_mean = np.zeros(self.nb_arms)
self.t = 0
self.g_minus = np.zeros(self.nb_arms)
self.g_pos = np.zeros(self.nb_arms)
self.s_minus = np.zeros(self.nb_arms)
self.s_pos = np.zeros(self.nb_arms)
self.eps = eps
self.h = h
def update(self, t, arm, reward):
if self.count[arm] == 1:
self.count[arm] -= 1
self.M_mean[arm] += reward
self.M_mean[arm] = self.M_mean[arm]/self.M
elif self.count[arm] >= 0:
self.count[arm] -= 1
if self.count[arm] > 0:
self.M_mean[arm] += reward
self.Na[arm] += 1
self.arm_sequence[t] = arm
self.reward[t] = reward
self.Sa[arm] += reward
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
def reset_CD(self, arm):
self.Sa[arm] = 0
self.Na[arm] = 0
self.count[arm] = self.M
self.M_mean[arm] = 0
self.g_minus[arm] = 0
self.g_pos[arm] = 0
self.s_minus[arm] = 0
self.s_pos[arm] = 0
def CUSUM(self, arm, reward):
if self.count[arm] > -1:
self.s_pos[arm] = 0
self.s_minus[arm] = 0
else:
self.s_pos[arm] = reward - self.M_mean[arm] - self.eps
self.s_minus = self.M_mean - reward - self.eps
self.g_pos[arm] = max(0, self.g_pos[arm] + self.s_pos[arm])
self.g_minus[arm] = max(0, self.g_minus[arm] + self.s_minus[arm])
if max(self.g_pos[arm], self.g_minus[arm]) >= self.h:
self.reset_CD(arm)
return True
return False
def regret(self):
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class TrackerMUCB(Tracker2):
def __init__(self, means, T, store_rewards_arm=False):
super(TrackerMUCB, self).__init__(means, T, store_rewards_arm)
self.time_changes = [0]
self.t = 0
def update(self, t, arm, reward):
self.Na[arm] += 1
self.arm_sequence[t] = arm
self.reward[t] = reward
self.Sa[arm] += reward
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
def reset_CD(self):
self.Sa = np.zeros(self.nb_arms)
self.Na = np.zeros(self.nb_arms)
def CD(self, arm, w, b):
m = self.rewards_arm[arm][-int(w):]
n = len(m)//2
m1 = m[:n]
m2 = m[n:]
if np.abs(sum(m2) - sum(m1)) > b:
return True
else:
return False
def regret(self):
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n-(i+1) > 0 and t >= self.time_changes[i+1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class SWTracker(Tracker2):
def __init__(self, means, T, tau, store_rewards_arm=False):
super(SWTracker, self).__init__(means, T, store_rewards_arm)
self.tau = tau
self.time_changes = [0]
def update(self, t, arm, reward):
"""
Update all the parameters of interest after choosing the correct arm
:param t: int, current time/round
:param arm: int, arm chosen at this round
:param Sa: np.array, cumulative reward during the last tau times
:param Na: np.array, number of times arm during last tau times
:param reward: np.array, rewards obtained with the policy up to time t-1
:param arm_sequence: np.array, arm chose at each step up to time t-1
"""
self.Na[arm] += 1
self.arm_sequence[t] = arm
self.reward[t] = reward
self.Sa[arm] += reward
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
if t >= self.tau:
first_t = t - self.tau
first_element = int(self.arm_sequence[first_t])
self.Na[first_element] -= 1
self.Sa[first_element] -= self.reward[first_t]
def regret_old(self):
"""
Compute the regret of a single experiment
:param reward: np.array, the array of reward obtained from the policy up to time T
:param T: int, time horizon
:return: np.array, cumulative regret for a single experiment
"""
means = np.array(pd.DataFrame(self.means, self.time_changes).reindex(np.arange(self.T)).fillna(method='ffill'))
return np.cumsum([means[t].max() - means[t, self.arm_sequence[t]] for t in range(self.T)])
def regret(self):
"""
Computing the regret when a discount policy is used.
:return:
"""
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n - (i + 1) > 0:
if t >= self.time_changes[i + 1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
class DiscountTracker(Tracker2):
def __init__(self, means, T, gamma, store_rewards_arm=False):
super(DiscountTracker, self).__init__(means, T, store_rewards_arm)
self.gamma = gamma
self.time_changes = [0]
def update(self, t, arm, reward):
"""
Update all the parameters of interest after choosing the correct arm
:param t: int, current time/round
:param arm: int, arm chosen at this round
:param Sa: np.array, cumulative reward during the last tau times
:param Na: np.array, number of times arm during last tau times
:param reward: np.array, rewards obtained with the policy up to time t-1
:param arm_sequence: np.array, arm chose at each step up to time t-1
"""
# Important modification all the arms are discounted with this tracker but the update rule is different
# for the arm selected.
self.Na = self.gamma * self.Na
self.Na[arm] += 1
self.arm_sequence[t] = arm
self.reward[t] = reward
# Updating the Sa for all arms but different update for the arm selected
self.Sa = self.gamma*self.Sa
self.Sa[arm] += reward
self.t = t
if self.store_rewards_arm:
self.rewards_arm[arm].append(reward)
def regret(self):
"""
Computing the regret when a discount policy is used.
:return:
"""
res = np.zeros(self.T)
n = len(self.time_changes)
i = 0
max_ = self.means[i].max()
for t in range(self.T):
if n - (i + 1) > 0:
if t >= self.time_changes[i + 1]:
i += 1
max_ = self.means[i].max()
res[t] = max_ - self.means[i][self.arm_sequence[t]]
return np.cumsum(res)
def regret_old(self):
"""
Compute the regret of a single experiment
:param reward: np.array, the array of reward obtained from the policy up to time T
:param T: int, time horizon
:return: np.array, cumulative regret for a single experiment
"""
means = np.array( | pd.DataFrame(self.means, self.time_changes) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.