prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6'])
res = util.to_contracts(notional, prices, multipliers,
rounder=pd.np.floor)
res_exp = pd.Series([0, 2], index=['CLZ6', 'COZ6'])
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier():
notionals = pd.Series([-30.20, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_to_contract_different_fx_with_multiplier_rounding():
# won't work out to integer number of contracts so this tests rounding
notionals = pd.Series([-30.21, 2 * 30.5 / 1.32 * 10, 10.2 * 0.8 * 100],
index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
multipliers = pd.Series([1, 10, 100], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_contracts(notionals, prices, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates,
multipliers=multipliers)
assert_series_equal(res, res_exp)
def test_trade_with_zero_amount():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, 0], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) + 0 * 0.5 / (50.41*100) - 1,
# 0 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 19], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_trade_all_zero_amount_return_empty():
wts = pd.DataFrame([1], index=["CLX16"], columns=[0])
desired_holdings = pd.Series([13], index=[0])
current_contracts = 0
prices = pd.Series([50.32], index=['CLX16'])
multiplier = pd.Series([100], index=['CLX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
exp_trades = pd.Series(dtype="int64")
assert_series_equal(trades, exp_trades)
def test_trade_one_asset():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1, 0],
index=['CLX16', 'CLZ16', 'CLF17'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_multi_asset():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=["CL0", "CL1"])
wts2 = pd.DataFrame([1], index=["COX16"], columns=["CO0"])
wts = {"CL": wts1, "CO": wts2}
desired_holdings = pd.Series([200000, -50000, 100000],
index=["CL0", "CL1", "CO0"])
current_contracts = pd.Series([0, 1, 0, 5],
index=['CLX16', 'CLZ16', 'CLF17',
'COX16'])
prices = pd.Series([50.32, 50.41, 50.48, 49.50],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
multiplier = pd.Series([100, 100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17', 'COX16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
# 100000 * 1 / (49.50*100) - 5,
exp_trades = pd.Series([20, 14, -5, 15], index=['CLX16', 'CLZ16',
'CLF17', 'COX16'])
exp_trades = exp_trades.sort_index()
assert_series_equal(trades, exp_trades)
def test_trade_extra_desired_holdings_without_weights():
wts = pd.DataFrame([0], index=["CLX16"], columns=["CL0"])
desired_holdings = pd.Series([200000, 10000], index=["CL0", "CL1"])
current_contracts = pd.Series([0], index=['CLX16'])
prices = pd.Series([50.32], index=['CLX16'])
multipliers = pd.Series([1], index=['CLX16'])
with pytest.raises(ValueError):
util.calc_trades(current_contracts, desired_holdings, wts, prices,
multipliers)
def test_trade_extra_desired_holdings_without_current_contracts():
# this should treat the missing holdings as 0, since this would often
# happen when adding new positions without any current holdings
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000, -50000], index=[0, 1])
current_contracts = pd.Series([0, 1],
index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41, 50.48],
index=['CLX16', 'CLZ16', 'CLF17'])
multiplier = pd.Series([100, 100, 100],
index=['CLX16', 'CLZ16', 'CLF17'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 50000 * 0.5 / (50.41*100) - 1,
# -50000 * 0.5 / (50.48*100) - 0,
exp_trades = pd.Series([20, 14, -5], index=['CLX16', 'CLZ16', 'CLF17'])
exp_trades = exp_trades.sort_index()
# non existent contract holdings result in fill value being a float,
# which casts to float64
assert_series_equal(trades, exp_trades, check_dtype=False)
def test_trade_extra_weights():
# extra weights should be ignored
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
desired_holdings = pd.Series([200000], index=[0])
current_contracts = pd.Series([0, 2], index=['CLX16', 'CLZ16'])
prices = pd.Series([50.32, 50.41], index=['CLX16', 'CLZ16'])
multiplier = pd.Series([100, 100], index=['CLX16', 'CLZ16'])
trades = util.calc_trades(current_contracts, desired_holdings, wts,
prices, multipliers=multiplier)
# 200000 * 0.5 / (50.32*100) - 0,
# 200000 * 0.5 / (50.41*100) - 2,
exp_trades = pd.Series([20, 18], index=['CLX16', 'CLZ16'])
assert_series_equal(trades, exp_trades)
def test_get_multiplier_dataframe_weights():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000], index=["CL"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dict_weights():
wts1 = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
wts2 = pd.DataFrame([0.5, 0.5], index=["COX16", "COZ16"], columns=[0])
wts = {"CL": wts1, "CO": wts2}
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
imults = util.get_multiplier(wts, ast_mult)
imults_exp = pd.Series([1000, 1000, 1000, 1000, 1000],
index=["CLF17", "CLX16", "CLZ16", "COX16",
"COZ16"])
assert_series_equal(imults, imults_exp)
def test_get_multiplier_dataframe_weights_multiplier_asts_error():
wts = pd.DataFrame([[0.5, 0], [0.5, 0.5], [0, 0.5]],
index=["CLX16", "CLZ16", "CLF17"],
columns=[0, 1])
ast_mult = pd.Series([1000, 1000], index=["CL", "CO"])
with pytest.raises(ValueError):
util.get_multiplier(wts, ast_mult)
def test_weighted_expiration_two_generics():
vals = [[1, 0, 1/2, 1/2, 0, 1, 0], [0, 1, 0, 1/2, 1/2, 0, 1]]
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF15'),
(TS('2015-01-03'), 'CLG15'),
( | TS('2015-01-04') | pandas.Timestamp |
# importing the necessory library
import numpy as np
import pandas as pd
# defining the function to read the box boundry
def dimension(file):
f = open(file,'r')
content = f.readlines()
# stroring the each vertext point on the data list
data = []
v_info = []
vertices_data =[]
# cartesian_data =[]
# vt_p = []
for x in range(len(content)):
# checking the file content cartesian points or not
if "CARTESIAN_POINT" in content[x]:
d=content[x].replace(",","").split(" ")
# Storing the cartesian point (X,Y,Z)
cartesian_data=d[0],d[7],d[8],d[9]
data.append(cartesian_data)
# checking for the unit used in step file.
elif "LENGTH_UNIT" in content[x]:
d=content[x].replace(",","").split(" ")
length_unit = (d[11] +" "+ d[12]).replace(".","").title()
elif "VERTEX_POINT " in content[x]:
dt=content[x].replace(",","").split(" ")
vt_p=dt[0],dt[5]
v_info.append(vt_p)
else:
pass
df = | pd.DataFrame (data, columns = ['Line_no','x','y','z']) | pandas.DataFrame |
import datetime
import pandas as pd
from src.models.model import *
from hyperopt import Trials, STATUS_OK, tpe, fmin, hp
from hyperas.utils import eval_hyperopt_space
from keras.optimizers import SGD
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score, recall_score, precision_score, f1_score
from src.helpers.preprocess_helpers import Standardizer
from src.helpers.import_helpers import LoadDataset, SplitData
X, y = LoadDataset('final_binned_global.csv', directory='data//processed//')
# Split data
X_train, y_train, X_test, y_test = SplitData(X,y, test_size=0.1)
# Standardize training data
X_train = Standardizer().standardize(X_train, na_values=False)
X_test = Standardizer().standardize(X_test, na_values=False)
# Save the split train and test dataset before optimization for future reference
y_train_save = | pd.DataFrame(y_train, columns=['LABEL']) | pandas.DataFrame |
import pandas as pd
from collections import Counter
def df_to_experiment_annotator_table(df, experiment_col, annotator_col, class_col):
"""
:param df: A Dataframe we wish to transform with that contains the response of an annotator to an experiment
| | document_id | annotator_id | annotation |
|---:|--------------:|:---------------|-------------:|
| 0 | 1 | A | 1 |
| 1 | 1 | B | 1 |
| 2 | 1 | D | 1 |
| 4 | 2 | A | 2 |
| 5 | 2 | B | 2 |
:param experiment_col: The column name that contains the experiment (unit)
:param annotator_col: The column name that identifies an annotator
:param class_col: The column name that identifies the annotators response (class)
:return: A dataframe indexed by annotators, with experiments as columns and the responses in the cells
| annotator_id | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:---------------|----:|----:|----:|----:|----:|----:|----:|----:|----:|-----:|-----:|-----:|
| A | 1 | 2 | 3 | 3 | 2 | 1 | 4 | 1 | 2 | nan | nan | nan |
| B | 1 | 2 | 3 | 3 | 2 | 2 | 4 | 1 | 2 | 5 | nan | 3 |
| C | nan | 3 | 3 | 3 | 2 | 3 | 4 | 2 | 2 | 5 | 1 | nan |
| D | 1 | 2 | 3 | 3 | 2 | 4 | 4 | 1 | 2 | 5 | 1 | nan |
"""
return df.pivot_table(
index=annotator_col, columns=experiment_col, values=class_col, aggfunc="first"
)
def make_value_by_unit_table_dict(experiment_annotator_df):
"""
:param experiment_annotator_df: A dataframe that came out of df_to_experiment_annotator_table
:return: A dictionary of dictionaries (e.g. a table) whose rows (first level) are experiments and columns are responses
{1: Counter({1.0: 1}),
2: Counter(),
3: Counter({2.0: 2}),
4: Counter({1.0: 2}),
5: Counter({3.0: 2}),
"""
data_by_exp = experiment_annotator_df.T.sort_index(axis=1).sort_index()
table_dict = {}
for exp, row in data_by_exp.iterrows():
vals = row.dropna().values
table_dict[exp] = Counter()
for val in vals:
table_dict[exp][val] += 1
return table_dict
def calculate_frequency_dicts(vbu_table_dict):
"""
:param vbu_table_dict: A value by unit table dictionary, the output of make_value_by_unit_table_dict
:return: A dictionary of dictonaries
{
unit_freqs:{ 1:2..},
class_freqs:{ 3:4..},
total:7
}
"""
vbu_df = (
| pd.DataFrame.from_dict(vbu_table_dict, orient="index") | pandas.DataFrame.from_dict |
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# this module has basic operations
data = pd.read_csv('pandas_help/winter.csv')
dataset = pd.read_csv('pandas_help/wine_data.csv', sep=';')
# if you have na values you can mention in pd.read_csv()
# if suppose in A column of data na_values are mentioned by -1 and in B column of data na_values are mentioned by -2
# in that case we can use below code:
#
# df = pd.read_csv(<dataframe>, na_values={'A':[], 'B':[]})
# %%
# to get the shape
print(data.shape) # show (rows, columns)
# %%
# to get data type of columns
print(data.dtypes)
# %%
# to get info
print(data.info())
# %%
# to get all the values as a list of list and want to ignore index
print(data.values)
# %%
# to get columns
print(data.columns)
# df.columns = df.columns.str.strip() to remove spaces
# if you want to drop a columns use df.drop([<column name list>, axis='columns'])
# selecting column with all non-zeros values df2.loc[:, df2.all()] --> missing values will be taken as non-zero
# selecting column with any NaN, df.loc[:, df.isnull().any()]
# dropping rows with any Nan df.dropna(how='any) how='all' will remove a row in which all columns are null you can provide
# thresh=<int> to drop rows/columns if it is equal or greater than thresh
# if you want to drop a row based on if particular column is na then use
# df.dropna(subset=[<column name>], inplace=True)
# %%
# if you want to concatenate two columns then you can use:
# data.A.str.cat(data.B, sep=' ') you can specify sep as you like
# %%
# if you want to get total null values in each column you can use data.isnull().sum()
# %%
# to get index
print(data.index)
# %%
# to reset index
data = data.reset_index()
# %%
# to get top 5 rows, you can use to get top n rows using data.head(<num of rows needed>).
print(data.head())
# %%
# to get below 5 rows, you can use to get bottom n rows using data.tail(<num of rows needed>).
print(data.tail())
# %%
# to get frequency count of a column
# value_counts is a function of Series not of dataFrame so cannot be applied as data.value_counts(dropna=False)
unique = data['Year'].value_counts(dropna=False) # dropna= False will include na values
# you can pass normalize=True to get propotions the values are between 0 and 1
print(unique)
# %%
# to get statistics use data.describe() only the columns with numeric data type will be return
# %%
# for plotting the histogram
dataset.plot('quality', subplots=True) # this will apply to all numeric columns and show result
plt.show()
# %%
# dataframe plot
iris = pd.read_csv('pandas_help/iris.data', header=None,
names=['petal width', 'petal length', 'sepal width', 'sepal length', 'species'])
iris.plot(x='sepal length', y='petal length') # here you can provide y as list ['petal length', 'sepal width'] also you
# can provide s=sizes
plt.show()
# %%
# you can plot histogram of single column as
dataset['quality'].plot(kind='hist') # can pass logx=True or logy = True for logarithmic scale and rot=70 will rotate
# the axis scale by 70 degree. You can also pass cumulative=True in hist for cumulative distribuiton
plt.legend(['Quality'])
plt.show()
# for specifying xlim and ylim use plt.xlim(0,10) or plt.ylim(0,10)
# %%
# boxplot can be used to see outliers
data.boxplot(column='Year', by='Gender') # rot = 90 will rotate the axis scale by 90 degree
plt.show()
iris = pd.read_csv('pandas_help/iris.data', header=None,
names=['petal width', 'petal length', 'sepal width', 'sepal length', 'species'])
iris.plot(y=['petal length', 'sepal length'], kind='box')
plt.show()
iris['petal length'].plot(kind='hist', cumulative=True, density=True)
plt.show()
# %%
# scatter plot can be used to see outliers as well
iris = pd.read_csv('pandas_help/iris.data', header=None,
names=['petal width', 'petal length', 'sepal width', 'sepal length', 'species'])
iris.plot(x='sepal length', y='petal length')
plt.show()
iris.hist() # this will plot all the numeric columns hist as different subplots
plt.show()
# %%
# melting the data use pd.melt(frame=<dataframe> , id_vars=[<to be kept constant>], value_vars=[<columns to melt>],
# var_name=<>, value_name=<>)
# %%
# pivoting data
# pivot cannot handle duplicate values use pivot_table
# data.pivot(index=<used as indexing>, columns=<to pivot>, value=<pivot colummns to fill>)
# %%
# pivot_table
# data.pivot_table(index=<columns used as indexing>, columns=<column to pivot>, values=<pivot columns to fill>, aggfunc=)
d = data.pivot_table(index=['Year', 'Country'], columns=['Gender', 'Medal'], values='Athlete', aggfunc='count')
d['Total'] = d.apply(np.sum, axis=1) # axis =1 will apply row wise
print(d)
d.reset_index(inplace=True)
print(d.head())
# %%
# if we have a function that takes multiple values and we want to apply that function to dataframe
# then we have specify that in apply()
# like if we have a function def diff_money(row, pattern)
# then df.apply(diff_money, axis=1, pattern=pattern) axis=1 will work row wise
# %%
# concatenate data in pandas for concatenating use, pandas.concat([<dataframe_list>], ignore_index=True) it will take
# the original dataframe indexes. so you have to reset the index by passing ignore_index=True, this will be row wise
# concatenation. For column wise concatenation pass axis=1 and data are joined based on index value equality
# you can also provide join='inner' in this case only the rows with index which are present in both dataframe will be
# shown, by default join='outer'
# for multi level indexing you can provide keys in .concat
# for example:
s1 = pd.Series(['a', 'b'], index=['a', 'b'])
s2 = pd.Series(['c', 'd'], index=['c', 'b'])
s = pd.concat([s1, s2], keys=['s1', 's2'], axis=0) # try with axis=1 or axis=0
print(type(s))
# .concat() can also work with the dict in that case keys of concat will keys of dict, dict = {key: DataFrame/Series}
# %%
# if two dataframes do not have same order than we can merge the data
df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 5]}, index=['A', 'B', 'C', 'D'])
df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
'value': [5, 6, 7, 8]}, index=['C', 'D', 'A', 'F'])
# here if we want to merge df1 and df2 but notice they don't have same order so we cannot concatenate with axis=1
# so we can merge the df1 and df2 using pd.merge during this type of merge index is ignored
# you can provide suffixes=[] for providing the column suffix if two dataframe have same column name and to distinguish
# in the final table
merged = pd.merge(left=df1, right=df2, left_on='lkey', right_on='rkey')
print(merged)
merged = pd.merge(df1, df2) # here it will be joined on the columns which are common to both and by default
# merge is inner you can provide how='inner'/'outer'/etc. for changing merging behaviour
print(merged)
# There are three types of merge one-to-one, manytoOne/onetomany and manytomany
# if you want to automatically order the merged data frame use pd.merge_ordered()
# argument that can be passed are fill_method, suffixes
# %%
# you can also join two dataframes using .join() like population.join(unemployment, how='inner')
# %%
# converting one type to another
# example: if we want to convert A column of data to str
# data['A'] = data['A].astype(str)
#
# example: if we want to convert A column of data to category --> category datatype is memory efficient
# https://campus.datacamp.com/courses/analyzing-police-activity-with-pandas/analyzing-the-effect-of-weather-on-policing?ex=4
# follow above link for more useful ways for using category
# data['A'] = data['A'].astype('category')
# %%
# suppose you have a numeric column but it has been set to object because of empty fields
# this can be taken care by
# df['A'] pd.to_numeric(df['A'], errors='coerce')
# above errors='coerce' anything that cannot be converted to numeric data type will be NaN
# if we do not pass errors='coerce' will return an error because python will have no idea what to do with string value
s = | pd.Series(['apple', '1.0', '2', -3]) | pandas.Series |
import argparse
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
import pandas as pd
import cv2
import json
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as F
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# from utils import utils
class FramesDataset(Dataset):
"""Creates a dataset that can be fed into DatasetLoader
Args:
frames (list): A list of cv2-compatible numpy arrays or
a list of PIL Images
"""
def __init__(self, frames):
# Convert to list of tensors
x = [F.to_tensor(img) for img in frames]
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Send the frames to device
x_device = [img.to(device) for img in x]
self.x = x_device #x
def __getitem__(self, idx):
return self.x[idx]
def __len__(self):
return len(self.x)
class ObjectDetector():
"""ObjectDetector class with staticmethods that can be called from outside by importing as below:
from helmet_detector.detector import ObjectDetector
The staic methods can be accessed using ObjectDetector.<name of static method>()
"""
@staticmethod
def load_custom_model(model_path=None, num_classes=None):
"""Load a model from local file system with custom parameters
Load FasterRCNN model using custom parameters
Args:
model_path (str): Path to model parameters
num_classes (int): Number of classes in the custom model
Returns:
model: Loaded model in evaluation mode for inference
"""
# load an object detection model pre-trained on COCO
model = fasterrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features,num_classes)
# load previously fine-tuned parameters
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
model.to(device)
else:
model.load_state_dict(torch.load(model_path, map_location=device))
# Put the model in evaluation mode
model.eval()
return model
@staticmethod
def run_detection(img, loaded_model):
""" Run inference on single image
Args:
img: image in 'numpy.ndarray' format
loaded_model: trained model
Returns:
Default predictions from trained model
"""
# need to make sure we have 3d tensors of shape [C, H, W]
with torch.no_grad():
prediction = loaded_model(img)
return prediction
@staticmethod
def to_dataframe_highconf(predictions, conf_thres, frame_id):
""" Converts the default predictions into a Pandas DataFrame, only predictions with score greater than conf_thres
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
frame_id : frame id
conf_thres: score greater than this will be kept as detections
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = pd.DataFrame(boxes, columns=['x1','y1','x2','y2'])
df['class_id'] = labels
df['score'] = scores
df['frame_id'] = frame_id
df_list.append(df)
df_detect = pd.concat(df_list, axis=0)
df_detect = df_detect[['frame_id','class_id','score','x1','y1','x2','y2']]
# Keep predictions with high confidence, with score greater than conf_thres
df_detect = df_detect.loc[df_detect['score'] >= conf_thres]
return df_detect
@staticmethod
def to_dataframe(predictions):
""" Converts the default predictions into a Pandas DataFrame
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = | pd.DataFrame(boxes, columns=['x1','y1','x2','y2']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import miditoolkit
import os
def getStats(folder_name,num_notes_dict={},channel=0):
if num_notes_dict=={}:
num_notes_dict=numNotes(folder_name,channel)
df= | pd.DataFrame.from_dict(num_notes_dict, orient='index',columns=["Notes"]) | pandas.DataFrame.from_dict |
__author__ = '<NAME>'
import os
import numpy as np
import pandas as pd
import ctypes
pd.options.mode.chained_assignment = None
from sklearn import cross_validation
from sklearn.metrics import mean_absolute_error
import matplotlib.pyplot as plt
import matplotlib
from sklearn.metrics import accuracy_score
matplotlib.style.use('ggplot')
import pylab as pl
from sklearn import preprocessing
from sklearn import ensemble
from sklearn.preprocessing import Imputer
from sklearn import linear_model
from sklearn.feature_selection import RFE
from sklearn.externals import joblib
from sklearn.learning_curve import learning_curve
from sklearn.learning_curve import validation_curve
import pickle
import sys
from sklearn.grid_search import GridSearchCV
from joblib import Parallel, delayed
# ToDo: This file needs cleanup
"""
Zdr: Differential reflectivity : it is a good indicator of drop shape and drop shape is a good estimate of average drop size.
RhoHV: Correlation coefficient: A statistical correlation between the reflected horizontal and vertical power returns.
High values, near one, indicate homogeneous precipitation types,
while lower values indicate regions of mixed precipitation types, such as rain and snow, or hail.
Kdp:Specific differential phase: It is a very good estimator of rain rate and is not affected by attenuation. The range derivative of
differential phase (specific differential phase, Kdp) can be used to localize areas of strong precipitation/attenuation.
"""
"""
ToDo: Feature Engineering
See more:
http://blog.kaggle.com/2015/07/01/how-much-did-it-rain-winners-interview-1st-place-devin-anzelmo/
https://www.kaggle.com/c/how-much-did-it-rain/forums/t/14242/congratulations
http://blog.kaggle.com/2015/06/08/how-much-did-it-rain-winners-interview-2nd-place-no-rain-no-gain/
http://blog.kaggle.com/2015/05/07/profiling-top-kagglers-kazanovacurrently-2-in-the-world/
"""
# region data prep
# ToDo: clean up train data with all missing input but valid label. put zero on label for such data
# if one of the 4 related features (%5..%90) has no value..hard to predict
def load_data(file, load_partial):
# traing data #of rows 13,765,201
#test data #of rows 8,022,756
if "test" in file:
if load_partial:
data = pd.read_csv(file, nrows=22757)
else:
data = pd.read_csv(file)
#test_id.append(np.array(data['Id']))
else: #train data
if load_partial:
data = pd.read_csv(file, nrows=1065201)
else:
data = pd.read_csv(file)
print("loaded data of " + str(data.shape))
return data
def clean_data(data):
data = data.drop(['minutes_past'], axis=1)
# remove data empty rows Ref values all nan
# data = data.set_index('Id')
# ref_sums = data['Ref'].groupby(level='Id').sum()
# null_refs_idx = [i for i in ref_sums.index if np.isnan(ref_sums[i])]
# data.drop(null_refs_idx, axis = 0, inplace = True)
return data
def add_class(data):
#create an empty series
c_data = [100 for _ in range(data.shape[0])]
c_series = pd.Series(c_data, name='Class')
#concat class column with data
pd.concat([data, c_series], axis=1, join='inner')
#change the rows of class to 1
data.loc[data['Expected'] <= 2.0, 'Class'] = 0
#data.loc[(data['Expected'] > 1.0) & (data['Expected'] <= 3.0), 'Class'] = 'Light'
data.loc[(data['Expected'] > 2.0) & (data['Expected'] <= 15.0), 'Class'] = 1
#data.loc[(data['Expected'] > 15.0) & (data['Expected'] <= 25.0), 'Class'] = 'Moderate Heavy'
data.loc[data['Expected'] > 15.0 , 'Class'] = 2
#print(data[['Expected','Class']].head(100))
return data
#note: non matching values get converted to NaN for -ve values
def add_features(data):
# Ref
Ref_MAX = data.groupby(['Id'], sort=False)['Ref'].max()
Ref_MAX.name = 'Ref_MAX'
Ref_MIN = data.groupby(['Id'], sort=False)['Ref'].min()
Ref_MIN.name = 'Ref_MIN'
Ref_count = data.groupby(['Id'], sort=False)['Ref'].count()
Ref_count.name = 'Ref_count'
Ref_std = data.groupby(['Id'], sort=False)['Ref'].std()
#Ref_std = Ref_std.pow(2)
Ref_std.name = 'Ref_std'
Ref_med = data.groupby(['Id'], sort=False)['Ref'].median()
Ref_med.name = 'Ref_med'
#Ref_skew = data.groupby(['Id'], sort=False)['Ref'].skew()
#Ref_skew.name = 'Ref_skew'
#Ref_mad = data.groupby(['Id'], sort=False)['Ref'].mad()
#Ref_mad.name = 'Ref_mad'
#Ref_kurt = data.groupby(['Id'], sort=False)['Ref'].kurtosis()
#Ref_kurt.name = 'Ref_kurt'
RefComposite_MAX = data.groupby(['Id'], sort=False)['RefComposite'].max()
RefComposite_MAX.name = 'RefComposite_MAX'
RefComposite_MIN = data.groupby(['Id'], sort=False)['RefComposite'].min()
RefComposite_MIN.name = 'RefComposite_MIN'
RefComposite_count = data.groupby(['Id'], sort=False)['RefComposite'].count()
RefComposite_count.name = 'RefComposite_count'
RefComposite_std = data.groupby(['Id'], sort=False)['RefComposite'].std()
#RefComposite_std = RefComposite_std.pow(2)
RefComposite_std.name = 'RefComposite_std'
RefComposite_med = data.groupby(['Id'], sort=False)['RefComposite'].median()
RefComposite_med.name = 'RefComposite_med'
#RefComposite_skew = data.groupby(['Id'], sort=False)['RefComposite'].skew()
#RefComposite_skew.name = 'RefComposite_skew'
#RefComposite_mad = data.groupby(['Id'], sort=False)['RefComposite'].mad()
#RefComposite_mad.name = 'RefComposite_mad'
#RefComposite_kurt = data.groupby(['Id'], sort=False)['RefComposite'].kurtosis()
#RefComposite_kurt.name = 'RefComposite_kurt'
#Zdr_MAX = data.groupby(['Id'], sort=False)['Zdr'].max()
#Zdr_MAX.name = 'Zdr_MAX'
#Zdr_MIN = data.groupby(['Id'], sort=False)['Zdr'].min()
#Zdr_MIN.name = 'Zdr_MIN'
Zdr_count = data.groupby(['Id'], sort=False)['Zdr'].count()
Zdr_count.name = 'Zdr_count'
Zdr_std = data.groupby(['Id'], sort=False)['Zdr'].std()
#Zdr_std = Zdr_std.pow(2)
Zdr_std.name = 'Zdr_std'
Zdr_med = data.groupby(['Id'], sort=False)['Zdr'].median()
Zdr_med.name = 'Zdr_med'
#Zdr_skew = data.groupby(['Id'], sort=False)['Zdr'].skew()
#Zdr_skew.name = 'Zdr_skew'
#Zdr_mad = data.groupby(['Id'], sort=False)['Zdr'].mad()
#Zdr_mad.name = 'Zdr_mad'
#Zdr_kurt = data.groupby(['Id'], sort=False)['Zdr'].kurtosis()
#Zdr_kurt.name = 'Zdr_kurt'
Kdp_MAX = data.groupby(['Id'], sort=False)['Kdp'].max()
Kdp_MAX.name = 'Kdp_MAX'
Kdp_MIN = data.groupby(['Id'], sort=False)['Kdp'].min()
#Kdp_MIN = Kdp_MIN.pow(2)
Kdp_MIN.name = 'Kdp_MIN'
Kdp_count = data.groupby(['Id'], sort=False)['Kdp'].count()
Kdp_count.name = 'Kdp_count'
#todo: kdp std should be added back
# Kdp_std = data.groupby(['Id'], sort=False)['Kdp'].std()
#Kdp_std = Kdp_std.pow(2)
#Kdp_std.name = 'Kdp_std'
#Kdp_med = data.groupby(['Id'], sort=False)['Kdp'].median()
#Kdp_med.name = 'Kdp_med'
#Kdp_skew = data.groupby(['Id'], sort=False)['Kdp'].skew()
#Kdp_skew.name = 'Kdp_skew'
#Kdp_mad = data.groupby(['Id'], sort=False)['Kdp'].mad()
#Kdp_mad.name = 'Kdp_mad'
#Kdp_kurt = data.groupby(['Id'], sort=False)['Kdp'].kurtosis()
#Kdp_kurt.name = 'Kdp_kurt'
#RhoHV_MAX = data.groupby(['Id'], sort=False)['RhoHV'].max()
#RhoHV_MAX.name = 'RhoHV_MAX'
#RhoHV_MIN = data.groupby(['Id'], sort=False)['RhoHV'].min()
#RhoHV_MIN.name = 'RhoHV_MIN'
RhoHV_count = data.groupby(['Id'], sort=False)['RhoHV'].count()
RhoHV_count.name = 'RhoHV_count'
RhoHV_std = data.groupby(['Id'], sort=False)['RhoHV'].std()
#RhoHV_std = RhoHV_std.pow(2)
RhoHV_std.name = 'RhoHV_std'
RhoHV_med = data.groupby(['Id'], sort=False)['RhoHV'].median()
RhoHV_med.name = 'RhoHV_med'
#RhoHV_skew = data.groupby(['Id'], sort=False)['RhoHV'].skew()
#RhoHV_skew.name = 'RhoHV_skew'
#RhoHV_mad = data.groupby(['Id'], sort=False)['RhoHV'].mad()
#RhoHV_mad.name = 'RhoHV_mad'
#RhoHV_kurt = data.groupby(['Id'], sort=False)['RhoHV'].kurtosis()
#RhoHV_kurt.name = 'RhoHV_kurt'
return Ref_MAX, Ref_MIN, Ref_count, Ref_std, Ref_med, \
RefComposite_MAX, RefComposite_MIN, RefComposite_count, RefComposite_std, RefComposite_med, \
Zdr_count, Zdr_std, Zdr_med, \
Kdp_MAX, Kdp_MIN, Kdp_count, \
RhoHV_count, RhoHV_std, RhoHV_med
#RhoHV_MIN, RhoHV_MAX, Zdr_MIN, Zdr_MAX, Kdp_med, Kdp_std
test_all_ids = []
test_non_empty_ids = []
test_empty_rows_ids = []
def transform_data(data, file):
#Ref = NaN means no rain fall at that instant, safe to remove
#avg the valid Ref over the hour
data_avg = data.groupby(['Id']).mean() #just using mean CV score: 23.4247096352
#print('columns', str(data_avg.columns))
if "test" in file:
global test_all_ids
test_all_ids = data_avg.index
global test_empty_rows_ids
test_empty_rows_ids = data_avg.index[np.isnan(data_avg['Ref'])]
global test_non_empty_ids
test_non_empty_ids = list((set(test_all_ids) - set(test_empty_rows_ids)))
data = data[np.isfinite(data['Ref'])]
#data = data[np.isfinite(data['Ref'])] #CV 23.4481724075
print("creating features...")
Ref_Max, Ref_Min, Ref_count, Ref_std, Ref_med, \
RefComposite_MAX, RefComposite_MIN, RefComposite_count, RefComposite_std, RefComposite_med, \
Zdr_count, Zdr_std, Zdr_med,\
Kdp_MAX, Kdp_MIN, Kdp_count, \
RhoHV_count, RhoHV_std, RhoHV_med = add_features(data)
#RhoHV_MAX, Zdr_MIN, Kdp_med, # Kdp_std,
print("adding features...")
data_avg = pd.concat([data_avg, Ref_Max, Ref_Min, Ref_count, Ref_std, Ref_med,
RefComposite_MAX, RefComposite_MIN, RefComposite_count, RefComposite_std, RefComposite_med,
Zdr_count, Zdr_std, Zdr_med,
Kdp_MAX, Kdp_MIN, Kdp_count,
RhoHV_count, RhoHV_std, RhoHV_med], axis=1, join='inner')
global features
features = data_avg.columns
#id = data['Id'].tolist()
#dist_id = set(id)
#test_valid_id = list(dist_id)
return data_avg
def remove_outlier(train_data):
#average rainfall per hour historically less than 5 inch or 127 mm #70 is considered strom
#100 gives 23.6765613211
#70 gives 23.426143398 // keep 70 as acceptable rain fall value
#50 gives 23.26343648
data = train_data[train_data['Expected'] <= 50]
#print(data.shape)
#data['Expected'].hist(color='green', cumulative=True, alpha=0.5, bins=50, orientation='horizontal', figsize=(16, 8))
#plt.show()
"""
data45 = data[data['Expected'] > 40]
print("40 < data < 50 ", data45.shape)
data40 = data[data['Expected'] <= 40]
print(data45.shape)
data34 = data40[data40['Expected'] > 30]
print("30 < data <= 40 ", data34.shape)
data30 = data40[data40['Expected'] <= 30]
print(data30.shape)
data23 = data30[data30['Expected'] > 20]
print("20 < data <= 30 ",data23.shape)
data20 = data30[data30['Expected'] <= 20]
print(data20.shape)
data12 = data20[data20['Expected'] > 10]
print("10 < data <= 20 ",data12.shape)
data10 = data20[data20['Expected'] <= 10]
print(" < data <= 10 ",data10.shape)
"""
#set expected values to zero for examples that has most feature values(< 5) = 0
#print(train_data.head(5))
#change expected value where more than four features values are empty (0)
#train_data.ix[(train_data == 0).astype(int).sum(axis=1) > 4, 'Expected'] = 0
#print(train_data.head(5))
return data
def remove_empty_rows(data):
# remove data empty rows Ref values all nan
#print(data.columns)
data = data[np.isfinite(data['Ref'])]
#print(data.shape)
# remove Ref values less than 5 as it's not raining
#data = data[data['Ref'] >= 5] #CV Score: 23.4583024399
#print(data.shape)
#data = data[data['Ref'] <= 50] #CV Score: 23.4583024399
#print(data.shape)
#data sanity check
#Ref [5,10,15,20,25,30,35,40,45,50] vs Rain [0.07,0.15,0.3,0.6,1.3,2.7,5.6,11.53,23.7,48.6]
ref_rain = [(5,0.07),(10,0.15),(15,0.3),(20,0.6),(25,1.3),(30,2.7),(35,5.6),(40,11.53),(45,23.7),(50,48.6)]
rho_hv = [1, 0.99, 0.988, 0.986, 0.982, 0.98, 0.978, 0.976, 0.974, 0.0972]
zdr = [1.1+0.4*i for i in range(0,10)]
kdp = [0.5*i for i in range(0,10)]
for index, row in data.iterrows():
if np.isnan(row['RhoHV']): #fill Nan with appropriate value
mul5 = int(row['Ref'] // 5)
mul5 = 0 if mul5 < 0 else mul5 #-ve values to 0
mul5 = 9 if mul5 > 9 else mul5 #>9 values to 9
#ref_rain[mul5][1]
data.loc[[index], 'RhoHV'] = rho_hv[mul5]
if np.isnan(row['Zdr']): #fill Nan with appropriate value
mul5 = int(row['Ref'] // 5)
mul5 = 0 if mul5 < 0 else mul5 #-ve values to 0
mul5 = 9 if mul5 > 9 else mul5 #>9 values to 9
#ref_rain[mul5][1]
data.loc[[index], 'Zdr'] = zdr[mul5]
if np.isnan(row['Kdp']): #fill Nan with appropriate value
mul5 = int(row['Ref'] // 5)
mul5 = 0 if mul5 < 0 else mul5 #-ve values to 0
mul5 = 9 if mul5 > 9 else mul5 #>9 values to 9
#ref_rain[mul5][1]
data.loc[[index], 'Kdp'] = zdr[mul5]
#mul5 = 1
# suspect_rows = []
# for index, row in data.iterrows():
# mul5 = int(row['Ref'] // 5)
# if mul5 < 1 or mul5 > 9:
# print("invalid index {0} produced by {1}".format(mul5, row['Ref']))
# continue
# if abs(row['Expected'] - ref_rain[mul5-1][1]) > 30 :
# print("index: {0} Ref: {1} Expected: {2}".format(index,row['Ref'],row['Expected']))
# suspect_rows.append(index)
# print("total suspected data "+str(len(suspect_rows)))
# print("suspected data indices"+str(suspect_rows))
#data = data[np.isfinite(data['Ref_5x5_10th'])]
#data = data.set_index('Id')
#ref_sums = data['Ref'] #.groupby(level='Id').sum()
#null_refs_idx = [i for i in ref_sums.index if np.isnan(ref_sums[i])]
#data.drop(null_refs_idx, axis = 0, inplace = True)
return data
def analyze_plot_data(data, type):
# if data series data
if isinstance(data, pd.Series):
data.hist(color='green', alpha=0.5, bins=50, orientation='horizontal', figsize=(16, 8))
#plt.title("distribution of samples in -> " + data.name)
#plt.ylabel("frequency")
#plt.xlabel("value")
pl.suptitle("kaggle_rain2_" + type + "_" + data.name)
#plt.show()
file_to_save = "kaggle_rain2_" + type + "_" + data.name + ".png"
path = os.path.join("./charts/", file_to_save)
plt.savefig(path)
else: #plot all data features/columns
for i in range(0, len(data.columns), 4):
#plt.title("distribution of samples in -> " + data.columns[i])
#plt.ylabel("frequency")
#plt.xlabel("value")
data[data.columns[i: i + 4]].hist(color='green', alpha=0.5, bins=50, figsize=(16, 8))
pl.suptitle("kaggle_rain2_" + type + "_" + data.columns[i])
#plt.show()
file_to_save = "kaggle_rain2_" + type + "_" + data.columns[i] + ".png"
path = os.path.join("./charts/", file_to_save)
plt.savefig(path)
#plt.figure()
#print(data.min())
#basic statistics of the data
#print(data.describe())
#data.hist(color='k', alpha=0.5, bins=25)
#plt.hist(data, bins=25, histtype='bar')
#plt.title(data.columns[0]+"distribution in train sample")
#plt.savefig(feature_name+".png")
#plt.show()
def plot_training_curve(model, X, y):
params = ["min_samples_leaf", "min_samples_split"]
p_range = [2, 4, 8, 10, 12, 14, 16, 18, 20]
# [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
for param in params:
print("plotting validation curve...")
train_scores, valid_scores = validation_curve(model, X, y, param_name=param, param_range=p_range, cv=3,
scoring='mean_absolute_error')
train_scores_mean = np.absolute(np.mean(train_scores, axis=1))
valid_scores_mean = np.absolute(np.mean(valid_scores, axis=1))
plt.title("Validation Curve with GBM")
plt.xlabel(param)
plt.ylabel("MAE")
plt.plot(p_range, train_scores_mean, label="Training Error", color="r", marker='o')
plt.plot(p_range, valid_scores_mean, label="Cross-validation Error", color="g", marker='s')
plt.legend(loc="best")
plt.show()
# t_sizes = [5000, 10000, 15000, 20000, 25000]
# train_sizes, lc_train_scores, lc_valid_scores = learning_curve(model, X, y, train_sizes=t_sizes, cv=3)
# print("plotting learning curve...")
# lc_train_scores_mean = np.absolute(np.mean(lc_train_scores, axis=1))
# lc_valid_scores_mean = np.absolute(np.mean(lc_valid_scores, axis=1))
#
# plt.title("Learning Curve with GBM")
# plt.xlabel("no. of examples")
# plt.ylabel("MAE")
#
# plt.plot(train_sizes, lc_train_scores_mean, label="Training score", color="r", marker='o')
# plt.plot(train_sizes, lc_valid_scores_mean, label="Cross-validation score", color="g", marker='s')
# plt.legend(loc="best")
# plt.show()
scaler = preprocessing.StandardScaler()
def standardize_data(X):
mean = X.mean(axis=0)
X -= mean
std = X.std(axis=0)
X /= std
standardized_data = X
return standardized_data
def normal_distribute_data(X):
# RhoHV is not normally distributed
#taking Log
#transformer = preprocessing.FunctionTransformer(np.log1p)
#transformer.transform(X['RhoHV'])
#print(X['RhoHV'].describe())
X['RhoHV'] = X['RhoHV'].apply(lambda x: np.log10(x))
#comment if removed as feature
#X['RhoHV_5x5_10th'] = np.log10(X['RhoHV_5x5_10th'])
#X['RhoHV_5x5_50th'] = np.log10(X['RhoHV_5x5_50th'])
#X['RhoHV_5x5_90th'] = np.log10(X['RhoHV_5x5_90th'])
rhoFeatures = ['RhoHV'] #,'RhoHV_5x5_10th','RhoHV_5x5_50th','RhoHV_5x5_90th']
for rhoFeature in rhoFeatures:
shiftBy = 0
rhoMean = X[rhoFeature].mean()
if rhoMean < 0:
shiftBy += abs(rhoMean)
X[rhoFeature] += shiftBy
return X
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
def impute_data(non_empty_data):
"""
calculate rain rate from Ref using marshal algo
"""
imp.fit(non_empty_data)
X = imp.transform(non_empty_data) # 23.2592765571 (better)
return X # non_empty_data.fillna(0) #23.2628586644 # X
def prepare_train_data(file_path, load_Partial):
print("preparing training data...")
train_data = load_data(file_path, load_Partial)
train_clean = clean_data(train_data)
train_no_outlier = remove_outlier(train_clean)
print("transforming data...")
transformed_data = transform_data(train_no_outlier, file_path)
non_empty_examples = remove_empty_rows(transformed_data)
#non_empty_examples = add_class(non_empty_examples)
labels = non_empty_examples['Expected']
#categoy = non_empty_examples['Class']
#labels.hist(cumulative=True,bins=50)
#plt.show()
# print('total data: '+str(non_empty_examples.shape[0]))
#
# global train_data_light_rain
# train_data_light_rain = non_empty_examples[non_empty_examples['Class'] == 0]
#
# print('light rain data: '+str(train_data_light_rain.shape[0]))
#
#
# global train_data_moderate_rain
# train_data_moderate_rain = non_empty_examples[non_empty_examples['Class'] == 1]
# print('moderate rain data: '+str(train_data_moderate_rain.shape[0]))
#
#
# global train_data_heavy_rain
# train_data_heavy_rain = non_empty_examples[non_empty_examples['Class'] == 2]
# print('heavy rain data: '+str(train_data_heavy_rain.shape[0]))
X_train = non_empty_examples.drop(['Expected'], axis=1)
#X_train = non_empty_examples.drop(['Expected','Class'], axis=1)
global X_columns
X_columns = X_train.columns
X_train = standardize_data(X_train)
# labels = standardize_data(labels)
#X_train = normal_distribute_data(X_train)
#drop features
#X_train = X_train.drop([#'Ref_5x5_10th','Ref_5x5_50th'
# 'Ref_5x5_90th',
# 'RefComposite_5x5_10th','RefComposite_5x5_50th','RefComposite_5x5_90th',
#'RhoHV_5x5_10th','RhoHV_5x5_50th','RhoHV_5x5_90th'
#,'Zdr_5x5_10th','Zdr_5x5_50th','Zdr_5x5_90th',
# 'Kdp_5x5_10th','Kdp_5x5_50th','Kdp_5x5_90th'
# ], axis=1)
X_train = impute_data(X_train)
#X_train = X_train.drop(['RhoHV'], axis=1)
#print(X_train.head(5000))
return X_train, labels #, categoy
def prepare_test_data(file_path, load_partial):
print("preparing test data...")
# file_path = "./test/test.csv"
#test_file_path = file_test #from kaggle site
#test_file_path = "./test/test_short.csv"
test_data = load_data(file_path, load_partial)
#test_file_path = file_test #from kaggle site
#test_file_path = "./test/test_short.csv"
test_clean = clean_data(test_data)
transformed_data = transform_data(test_clean, file_path)
non_empty_data = remove_empty_rows(transformed_data)
#print('test data', str(non_empty_data.columns))
X_test = standardize_data(non_empty_data)
#X_test = normal_distribute_data(X_test)
#drop features
#X_test = X_test.drop([#'Ref_5x5_10th','Ref_5x5_50th','Ref_5x5_90th',
# 'RefComposite_5x5_10th','RefComposite_5x5_50th','RefComposite_5x5_90th',
# 'RhoHV_5x5_10th','RhoHV_5x5_50th','RhoHV_5x5_90th'
# ,'Zdr_5x5_10th','Zdr_5x5_50th','Zdr_5x5_90th',
# 'Kdp_5x5_10th','Kdp_5x5_50th','Kdp_5x5_90th'
# ], axis=1)
X_test = impute_data(X_test)
#global test_id
#test_id = test_avg['Id']
#test_input = test_avg.drop(['Id'], axis=1)
return X_test, non_empty_data #test_input
# endregion data prep
#region train
def evaluate_models(train_input, labels):
print("evaluating models...")
#regr = linear_model.LinearRegression()
#ridge = Ridge(alpha=1.0)
#laso = linear_model.Lasso(alpha=0.1)
#enet = linear_model.ElasticNet(alpha=0.1)
#clf_dtr = tree.DecisionTreeRegressor()
#ada = ensemble.AdaBoostRegressor(n_estimators=500, learning_rate=.75)
#bag = ensemble.BaggingRegressor(n_estimators=500)
param_grid = {
'min_samples_split': [4, 8, 12, 16,20, 24, 30, 35, 40, 45, 50],
'min_samples_leaf': [8,10,12,14, 18, 20, 22, 25]
}
#increase n_estimators to 400ish
est = ensemble.GradientBoostingRegressor(n_estimators=400)
extree = ensemble.ExtraTreesRegressor(n_estimators=800, max_features=1.0, n_jobs=-1) #-1 sets it to #of cores
gs_cv = GridSearchCV(extree,param_grid,score_func='mean_absolute_error').fit(train_input,labels)
#best parameters {'min_samples_leaf': 4, 'max_depth': 4, 'min_samples_split': 4, 'max_leaf_nodes': 6, 'max_features': 0.5} from grid search gave score 0.1647636331960101
#best parameters {'max_depth': 4, 'min_samples_split': 4, 'min_samples_leaf': 2, 'max_features': 0.5, 'max_leaf_nodes': 8} from grid search gave score 0.17065558286341795
print("best parameters {0} from grid search gave score {1} ".format(gs_cv.best_params_, gs_cv.best_score_))
params = gs_cv.best_params_
#clf = ensemble.GradientBoostingRegressor(n_estimators = 150,**params)
# cv_scre_last = 100
# for ne in range(20, 400, 10):
#
# #clf_rf = ensemble.RandomForestRegressor(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0, max_features="auto")
#
ne, ss, md = 190, 25, 10 #CV score: 24.340700094525427
clf = extree
# n_estimators=150 CV score: 24.401973843565866 //too slow 170 gives 24.39021427337333
#ne, md, ss = 50, 10, 10
#clf_gbt = ensemble.GradientBoostingRegressor(n_estimators=ne, max_depth=md, min_samples_split=ss, min_samples_leaf=10, learning_rate=0.1, loss='ls')
#
# #print(len(train_input))
# #print(len(labels))
# clf = clf_gbt
# # model evaluator
scores = cross_validation.cross_val_score(clf, train_input, labels, cv=5, scoring='mean_absolute_error')
cv_scre = 21.5 + abs(sum(scores) / len(scores))
print("CV score: {0} - #of_estimators: {1}".format(cv_scre, ne))
#
# if cv_scre >= cv_scre_last:
# print("MAE score didn't improve for #of_estimators: " + str(ne))
# continue
# else:
# cv_scre_last = cv_scre
#model evaluator
""" model evaluation
NEW:
Random Forest evaluation score : 1.13246481639
Extra Tree evaluation score : 1.13414660721
Bagging Regressor evaluation score : 1.15816159605
Gradient Boosting evaluation score : 1.17339099314
#linear regression evaluation score: 1.74012818638
#ridge regression evaluation score: 1.72820341712
#lasso regression evaluation score: 1.58996750817
#elastic_net evaluation score: 1.60092318938
#dtree regression evaluation score: 1.64168047513
#adaBoost regressor evaluation score: 2.81744083141
#Bagging Regressor evaluation score: 1.1617702616
#random forest evaluation score: 1.44005742499
#random forest evaluation score: 1.35075879522 with params
params_rf = {'n_estimators': 500, 'max_depth':None, 'min_samples_split':1, 'random_state':0}
#gradient boosted tree evaluation score: 1.47009354892
#gradient boosted tree evaluation score: 1.42787523525 with params
#{'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1, 'learning_rate': 0.01, 'loss': 'ls'}
"""
return clf
def cv_score(clf, X, y):
print("cross validating model...")
scores = cross_validation.cross_val_score(clf, X, y, cv=3, scoring='mean_absolute_error')
return abs(sum(scores) / len(scores))
def cross_validate_model(model, X_train, y_train):
cvs = 21.5 + cv_score(model, X_train, y_train)
print("MAE on cross validation set: " + str(cvs))
model = None
def pickle_model(model):
# pickle model
with open('./pickled_model/rain2.pickle', 'wb') as f:
pickle.dump(model, f)
f.close()
#joblib.dump(model, './pickled_model/rain2.pkl')
def unpickle_model(file):
with open('./pickled_model/rain2.pickle', 'rb') as f:
model = pickle.load(f)
return model
def split_train_data(train_input, labels, t_size):
# train_test does shuffle and random splits
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
train_input, labels, test_size=t_size, random_state=0)
return X_train, X_test, y_train, y_test
def test_model(model, X_test, y_test):
# print("testing on holdout set...")
pred_y = model.predict(X_test)
print("MAE on holdout test set", 21.5 + mean_absolute_error(y_test, pred_y))
def feature_selection(model, features):
# feature engineering
print("feature selection...")
print(features)
print("feature importance", model.feature_importances_)
min_index = np.argmin(model.feature_importances_)
print("min feature : ({0}){1}, score {2} ".format(min_index, features[min_index], min(model.feature_importances_)))
def train_model(seed, X_train, y_train, isPickled):
#clf = evaluate_models(labels, train_input)
#n_estimators = no. of trees in the forest
#n_jobs = #no. of cores
#clf_rf = ensemble.RandomForestRegressor(n_estimators=100, max_depth=None, n_jobs=4, min_samples_split=1, random_state=0)
#extree = ensemble.ExtraTreesRegressor(n_estimators=190, max_depth=25, min_samples_split=10, n_jobs=-1)
#clf_rf = ensemble.RandomForestRegressor(n_estimators=50, max_depth=None, n_jobs=4, min_samples_split=1,
# max_features="auto")
#ne=400, md=10, ss = 50, sl=10, 10 MAE 22.7590658805 and with learning rate = 0.01 MAE 23.5737808932
#clf_gbt = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=10, min_samples_split=10, min_samples_leaf=10, learning_rate=0.01, loss='ls')
#clf_gbt = ensemble.GradientBoostingRegressor(n_estimators=40, learning_rate=0.1, max_features =0.3, max_depth=4, min_samples_leaf=3, loss='lad')
clf = ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_features=0.3, max_depth=4,
min_samples_leaf=3, loss='huber', alpha=0.55, random_state=seed)
#random forest: 24.53496
#extra tree : 24.471939 (4619908812 with all features)
#gbd : 24.5456
#print("cross validating...")
#cvs = cv_score(clf, X_train, y_train)
#print("CV score: ", 21.5 + cvs)
global model
print("training model...")
#model = clf.fit(train_input, labels)
model = clf.fit(X_train, y_train)
if isPickled:
print("pickling model...")
pickle_model(model)
#no_of_pred = len(pred_y)
# MAE=0
# for i,v in enumerate(pred_y):
# actual = y_test.values[i]
# predicted = v
# error = actual - predicted
# print("rainfall actual: {0} predicted:{1}, error:{2}".
# format(actual, predicted, np.abs(error)))
# MAE = MAE + np.abs(error)
# #print("MAE: ",MAE/no_of_pred)
return model
#endregion train
#test
def write_prediction(test_y, test_non_empty_total_ids):
print("writing to file....")
predictionDf = pd.DataFrame(index=test_non_empty_total_ids, columns=['Expected'], data=test_y)
#predict 0 for empty rows/ids
empty_test_y = np.asanyarray([0 for _ in test_empty_rows_ids])
emptyRowsDf = | pd.DataFrame(index=test_empty_rows_ids, columns=['Expected'], data=empty_test_y) | pandas.DataFrame |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
print(df_all.columns.tolist())
print (df_all.info())
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
avg_hospid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('HospID')['HospID'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('HospID')['HospID'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('HospID')['HospID'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('HospID')['HospID'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('HospID')['HospID'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('HospID')['HospID'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('HospID')['HospID'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('HospID')['HospID'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('HospID')['HospID'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('HospID')['HospID'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/total op sum all years HospID.csv")
# print("details on site id dist:")
# # print("num of all sites: ", len(df_sum_all_Years))
#
# less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
# less_8.to_csv("total op less 10 years siteid.csv")
# print("num of sites with less years: ", len(less_8))
#
# x = np.array(less_8['Distinct_years'])
# print(np.unique(x))
avg_hospid['HospID'] = df_sum_all_Years['HospID']
avg_hospid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_hospid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_hospid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_reop():
df2010 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'Reoperation').sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='HospID', how='outer')
df2 =pd.merge(df1, df2012, on='HospID', how='outer')
df3 =pd.merge(df2, df2013, on='HospID', how='outer')
df4 =pd.merge(df3, df2014, on='HospID', how='outer')
df5 =pd.merge(df4, df2015, on='HospID', how='outer')
df6 =pd.merge(df5, df2016, on='HospID', how='outer')
df7 =pd.merge(df6, df2017, on='HospID', how='outer')
df8 =pd.merge(df7, df2018, on='HospID', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='HospID', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['HospID'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['HospID', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / avg_hospid['num_of_years']
df_sum_all_Years.to_csv("/tmp/pycharm_project_723/files/sum all years HospID reop.csv")
# -----------------------first op------------------------------------
df_10 = df_2010.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2010_FirstOperation')
df_11 = df_2011.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2011_FirstOperation')
df_12 = df_2012.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2012_FirstOperation')
df_13 = df_2013.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2013_FirstOperation')
df_14 = df_2014.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2014_FirstOperation')
df_15 = df_2015.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2015_FirstOperation')
df_16 = df_2016.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2016_FirstOperation')
df_17 = df_2017.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2017_FirstOperation')
df_18 = df_2018.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2018_FirstOperation')
df_19 = df_2019.groupby('HospID')['Reoperation'].apply(lambda x: (x == 'First Time').sum()).reset_index(name='2019_FirstOperation')
d1 = pd.merge(df_10, df_11, on='HospID', how='outer')
d2 = pd.merge(d1, df_12, on='HospID', how='outer')
d3 = pd.merge(d2, df_13, on='HospID', how='outer')
d4 = pd.merge(d3, df_14, on='HospID', how='outer')
d5 = pd.merge(d4, df_15, on='HospID', how='outer')
d6 = pd.merge(d5, df_16, on='HospID', how='outer')
d7 = | pd.merge(d6, df_17, on='HospID', how='outer') | pandas.merge |
#!/usr/bin/env python3
import csv
import os
import time
from datetime import date, datetime, timedelta
from pprint import pprint
import numpy as np
import pandas as pd
import pymongo
import requests
import yfinance as yf
LIMIT = 1000
sp500_file = "constituents.csv"
date_format = "%Y-%m-%d %H:%M:%S"
sp500_list = []
sp500_agg_dict = {}
# EXTRACT
# Read in S&P500 list
if os.path.exists(sp500_file):
with open(sp500_file, "r") as f:
sp500_reader = csv.reader(f)
for i, row in enumerate(sp500_reader):
if i != 0:
sp500_list.append(row)
else:
print(f"ERROR!!! Cannot find {sp500_file}")
# Connect to db
db = pymongo.MongoClient(
f"mongodb+srv://example:{os.getenv('MONGO_ATLAS_PW')}@cluster0.b2q7e.mongodb.net/?retryWrites=true&w=majority"
)
# Get last year
today = datetime.today()
last_year = today - timedelta(days=365)
last_year_list = | pd.date_range(last_year, today) | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HBox, VBox, Dropdown, Button, Output, Checkbox)
from src.ipycbm.utils import config, data_options
def time_series(path):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import timedelta
import pandas as pd
import json
import glob
confvalues = config.read()
inst = confvalues['set']['institution']
file_info = glob.glob(f"{path}*_information.json")[0]
with open(file_info, 'r') as f:
info_data = json.loads(f.read())
pid = info_data['ogc_fid'][0]
crop_name = info_data['cropname'][0]
area = info_data['area'][0]
figure_dpi = 50
def plot_ts_s2(cloud):
file_ts = glob.glob(f"{path}*_time_series_s2.csv")[0]
df = pd.read_csv(file_ts, index_col=0)
df['date'] = pd.to_datetime(df['date_part'], unit='s')
start_date = df.iloc[0]['date'].date()
end_date = df.iloc[-1]['date'].date()
print(f"From '{start_date}' to '{end_date}'.")
pd.set_option('max_colwidth', 200)
pd.set_option('display.max_columns', 20)
# Plot settings are confirm IJRS graphics instructions
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 14
df.set_index(['date'], inplace=True)
dfB4 = df[df.band == 'B4'].copy()
dfB8 = df[df.band == 'B8'].copy()
datesFmt = mdates.DateFormatter('%-d %b %Y')
if cloud is False:
# Plot NDVI
fig = plt.figure(figsize=(16.0, 10.0))
axb = fig.add_subplot(1, 1, 1)
axb.set_title(
f"Parcel {pid} (crop: {crop_name}, area: {area:.2f} ha)")
axb.set_xlabel("Date")
axb.xaxis.set_major_formatter(datesFmt)
axb.set_ylabel(r'DN')
axb.plot(dfB4.index, dfB4['mean'], linestyle=' ', marker='s',
markersize=10, color='DarkBlue',
fillstyle='none', label='B4')
axb.plot(dfB8.index, dfB8['mean'], linestyle=' ', marker='o',
markersize=10, color='Red',
fillstyle='none', label='B8')
axb.set_xlim(start_date, end_date + timedelta(1))
axb.set_ylim(0, 10000)
axb.legend(frameon=False) # loc=2)
return plt.show()
else:
# Plot Cloud free NDVI.
dfSC = df[df.band == 'SC'].copy()
dfNDVI = (dfB8['mean'] - dfB4['mean']) / \
(dfB8['mean'] + dfB4['mean'])
cloudfree = ((dfSC['mean'] >= 4) & (dfSC['mean'] < 6))
fig = plt.figure(figsize=(16.0, 10.0))
axb = fig.add_subplot(1, 1, 1)
axb.set_title(
f"{inst}\nParcel {pid} (crop: {crop_name}, area: {area:.2f} sqm)")
axb.set_xlabel("Date")
axb.xaxis.set_major_formatter(datesFmt)
axb.set_ylabel(r'NDVI')
axb.plot(dfNDVI.index, dfNDVI, linestyle=' ', marker='s',
markersize=10, color='DarkBlue',
fillstyle='none', label='NDVI')
axb.plot(dfNDVI[cloudfree].index, dfNDVI[cloudfree],
linestyle=' ', marker='P',
markersize=10, color='Red',
fillstyle='none', label='Cloud free NDVI')
axb.set_xlim(start_date, end_date + timedelta(1))
axb.set_ylim(0, 1.0)
axb.legend(frameon=False) # loc=2)
return plt.show()
def plot_ts_bs():
import numpy as np
file_ts = glob.glob(f"{path}*_time_series_bs.csv")[0]
df = pd.read_csv(file_ts, index_col=0)
df['date'] = pd.to_datetime(df['date_part'], unit='s')
start_date = df.iloc[0]['date'].date()
end_date = df.iloc[-1]['date'].date()
print(f"From '{start_date}' to '{end_date}'.")
pd.set_option('max_colwidth', 200)
| pd.set_option('display.max_columns', 20) | pandas.set_option |
import re
import string
import logging
import pandas as pd
import numpy as np
import text
import super_pool
logger = logging.getLogger()
cleanup = text.SimpleCleanup()
emoji = text.Emoji()
def hash_(x):
return hash(x)
def run(df=None):
if df is None:
df = pd.read_csv(
"../input/train.csv", usecols=["description", "title", "deal_probability"]
)
df_test = pd.read_csv("../input/test.csv", usecols=["description", "title"])
df = | pd.concat([df, df_test], axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # 5m - Df unification (10 calib. fn-s)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from os.path import join
import pickle
from copy import copy
def get_data_name(file):
if "resnet110" in file:
return "resnet110"
elif "densenet40" in file:
return "densenet40"
else:
return "wide32"
def get_strs(file, is_ece = True, is_kde = False, is_bip = False):
extra = 0
pieces = file.split(".")[0].split("_tag_")
parts1 = pieces[0].split("_")
parts2 = pieces[1].split("_")
n_data = -1
seed = -1
# binning_CV_seed_0_10000_VecS_wide32_s7_tag_confidencegt1_dp_-1.pkl
if is_ece:
cal_method = "_".join(parts1[5:6])
data_name = get_data_name("_".join(parts1[6:]))
tag_name = parts2[0][:-3]
cgt_nr = int(parts2[0][-1])
# KDE_seed_9_10000_VecS_resnet_wide32_s7_tag_1vsRest5_with_c_dp_-1.pkl
elif is_kde:
cal_method = "_".join(parts1[4:5])
data_name = get_data_name("_".join(parts1[5:]))
tag_name = parts2[0]
cgt_nr = -1
# df_seed_1_platt_resnet_3000_cv_0_wide32_s7_tag_confidence_with_cgt3_dp_-1_iso_beta_platt.pkl
# df_seed_6_TempS_3000_cv_0_resnet_wide32_s7_1vsRest5_m_3_921420382311321_with_cgt0_dp_-1_iso_beta_platt
elif is_bip:
cal_method = "_".join(parts1[3:4])
data_name = get_data_name("_".join(parts1[4:]))
n_data = int(parts1[4])
tag_name = parts2[0][:-3]
cgt_nr = int(parts2[0][-1])
seed = int(parts1[2])
# 'df_seed_0_beta_10000_cv_0_densenet40_s7_tag_1vsRest1gt0_dp_-1_iso_beta_platt.pkl'
#df_seed_0_beta_10000_cv_0_resnet110_s7_tag_confidencegt3_dp_-1_iso_beta_platt.pkl
# df_seed_2_Isotonic_resnet110_10000_cv_0_s7_tag_confidence_with_c_dp_-1_PW_NN4_sweep.pkl
else:
cal_method = "_".join(parts1[3:4])
data_name = get_data_name("_".join(parts1[4:]))
tag_name = parts2[0]
cgt_nr = -1
return (cal_method, data_name, tag_name, cgt_nr, n_data, seed)
# In[7]:
def get_cgts(df):
all_cdc = []
all_cdcs = []
all_pdc = []
all_pdcs = []
for cdc, cdcs, pdc, pdcs in zip(df.c_hat_distance_c, df.c_hat_distance_c_square, df.p_distance_c, df.p_distance_c_square):
if len(np.array(cdc)) != 4:
print(cdc)
all_cdc.append(np.array(cdc))
all_cdcs.append(np.array(cdcs))
all_pdc.append(np.array(pdc))
all_pdcs.append(np.array(pdcs))
all_cdc = np.array(all_cdc)
all_cdcs = np.array(all_cdcs)
all_pdc = np.array(all_pdc)
all_pdcs = np.array(all_pdcs)
dfs = []
for i in range(4):
if len(all_cdc.shape) == 1:
print()
df_new = df.copy()
df_new.c_hat_distance_c = all_cdc[:,i]
df_new.c_hat_distance_c_square = all_cdcs[:,i]
df_new.p_distance_c = all_pdc[:,i]
df_new.p_distance_c_square = all_pdcs[:,i]
df_new.cgt_nr = i
dfs.append(df_new)
return pd.concat(dfs)
def prep_ECE(files_ECE, columns, path, id_tag):
dfs = []
for file in files_ECE:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, _, _ = get_strs(file)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
dfs.append(df)
df_ECE = pd.concat(dfs)
# Binning column = full method name
df_ECE["binning"] = df_ECE["binning"] + "_" + df_ECE["n_bins"].map(str) + "_" + df_ECE["n_folds"].map(str)
# Remove CV marker from no CV rows
df_ECE["binning"] = df_ECE['binning'].str.replace('(_0$)', "")
# ECE drop useless columns
df_ECE = df_ECE.drop(labels=['n_folds'], axis=1)
# ECE rename columns to match PW
df_ECE = df_ECE.rename({"ECE_abs":"c_hat_distance_p", "ECE_abs_debiased": "c_hat_distance_p_debiased",
"ECE_square":"c_hat_distance_p_square", "ECE_square_debiased":"c_hat_distance_p_square_debiased",
"true_calibration_error_abs":"p_distance_c", "true_calibration_error_square":"p_distance_c_square",
"slope_abs_c_hat_dist_c": "c_hat_distance_c", "slope_square_c_hat_dist_c": "c_hat_distance_c_square"}, axis=1)
df_ECE = df_ECE[columns]
df_ECE.to_pickle("res_ECE_%s.pkl" % id_tag, protocol=4)
def prep_PW(files_PW, columns, path, id_tag):
dfs = []
for file in files_PW:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, _, _ = get_strs(file, is_ece = False)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
dfs.append(df)
df_PW = pd.concat(dfs)
#df_PW.to_pickle("res_PW_%s_test.pkl" % id_tag, protocol=4)
# binnings = df_PW.binning.unique()
# binning_with_trick = []
# for binning in binnings:
# if "trick" in binning:
# binning_with_trick.append(binning)
# for bwt in binning_with_trick:
# df_PW = df_PW.loc[df_PW.binning != bwt] # Drop trick
print(df_PW.binning.unique())
# Create dummy columns for our method
df_PW["c_hat_distance_p_debiased"] = df_PW["c_hat_distance_p"]
df_PW["c_hat_distance_p_square_debiased"] = df_PW["c_hat_distance_p_square"]
# Unify calibration_function name column to match ECE_df
df_PW["calibration_function"] = df_PW['calibration_function'].str.replace('(_[0-9].[0-9]+$)', "")
df_PW = get_cgts(df_PW)
df_PW = df_PW[columns]
df_PW.to_pickle("res_PW_%s.pkl" % id_tag, protocol=4)
def prep_BIP(files_BIP, columns, path, id_tag):
dfs = []
for file in files_BIP:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, n_data, seed = get_strs(file, is_ece = False, is_bip = True)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
df["n_data"] = n_data
df["seed"] = seed
df["p_distance_c"] = -1
df["p_distance_c_squared"] = -1
dfs.append(df)
df_BIP = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import numpy as np
import pytest
from sklearn.exceptions import ConvergenceWarning
def test_interpolate_data():
from mspypeline.modules.Normalization import interpolate_data
assert interpolate_data(pd.DataFrame()).equals(pd.DataFrame())
data = pd.DataFrame(np.random.random((100, 100)))
data[np.random.random((100, 100)) > 0.5] = np.nan
assert interpolate_data(data).isna().sum().sum() == 0
def test_median_polish():
from mspypeline.modules.Normalization import median_polish
with pytest.warns(RuntimeWarning) as record:
median_polish(pd.DataFrame())
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[0] == "Mean of empty slice"
with pytest.warns(ConvergenceWarning) as record:
median_polish(pd.DataFrame(np.random.random((10, 10))), max_iter=1)
assert len(record) == 1
# TODO testcase with known data and result
def test_base_normalizer():
from mspypeline.modules.Normalization import BaseNormalizer
class NormTest(BaseNormalizer):
def fit(self, data):
super().fit(data)
def transform(self, data):
super().transform(data)
nt = NormTest()
with pytest.raises(NotImplementedError):
nt.fit( | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import FunctionTransformer, StandardScaler, RobustScaler
from sklearn.preprocessing import Imputer, MultiLabelBinarizer
from sklearn.impute import SimpleImputer
from data_science_toolbox.pandas.profiling.data_types import df_binary_columns_list
from functools import reduce
import warnings
###############################################################################################################
# Custom Transformers from PyData Seattle 2017 Talk
###############################################################################################################
# Reference
# http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html
# https://github.com/jem1031/pandas-pipelines-custom-transformers
class DFFunctionTransformer(TransformerMixin):
# FunctionTransformer but for pandas DataFrames
def __init__(self, *args, **kwargs):
self.ft = FunctionTransformer(*args, **kwargs)
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
Xt = self.ft.transform(X)
Xt = pd.DataFrame(Xt, index=X.index, columns=X.columns)
return Xt
class DFFeatureUnion(BaseEstimator, TransformerMixin):
# FeatureUnion but for pandas DataFrames
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
for (name, t) in self.transformer_list:
t.fit(X, y)
return self
def transform(self, X):
# assumes X is a DataFrame
Xts = [t.transform(X) for _, t in self.transformer_list]
Xunion = reduce(lambda X1, X2: pd.merge(X1, X2, left_index=True, right_index=True), Xts)
return Xunion
class DFImputer(TransformerMixin):
# Imputer but for pandas DataFrames
def __init__(self, strategy='mean', fill_value=None):
self.strategy = strategy
self.imp = None
self.statistics_ = None
self.fill_value = fill_value
if (self.strategy == 'constant') & (not self.fill_value):
warnings.warn('DFImputer strategy set to "constant" but no fill value provided.'
'By default the fill value will be set to 0')
self.fill_value = 0
def fit(self, X, y=None):
self.imp = SimpleImputer(strategy=self.strategy, fill_value=self.fill_value)
self.imp.fit(X)
self.statistics_ = pd.Series(self.imp.statistics_, index=X.columns)
return self
def transform(self, X):
# assumes X is a DataFrame
Ximp = self.imp.transform(X)
Xfilled = pd.DataFrame(Ximp, index=X.index, columns=X.columns)
return Xfilled
class DFStandardScaler(BaseEstimator, TransformerMixin):
# StandardScaler but for pandas DataFrames
def __init__(self, cols=None):
self.ss = None
self.mean_ = None
self.scale_ = None
self.cols = cols
def fit(self, X, y=None):
if not self.cols:
self.cols = X.select_dtypes(include=np.number).columns.values.tolist()
self.ss = StandardScaler()
self.ss.fit(X[self.cols])
self.mean_ = pd.Series(self.ss.mean_, index=X[self.cols].columns)
self.scale_ = pd.Series(self.ss.scale_, index=X[self.cols].columns)
return self
def transform(self, X):
# assumes X is a DataFrame
# Scale the specified columns
Xss = self.ss.transform(X[self.cols])
Xscaled = pd.DataFrame(Xss, index=X.index, columns=X[self.cols].columns)
# Merge back onto the dataframe
Xscaled = pd.merge(X[[col for col in X.columns if col not in self.cols]],
Xscaled, left_index=True, right_index=True)
return Xscaled
class DFRobustScaler(TransformerMixin):
# RobustScaler but for pandas DataFrames
def __init__(self):
self.rs = None
self.center_ = None
self.scale_ = None
def fit(self, X, y=None):
self.rs = RobustScaler()
self.rs.fit(X)
self.center_ = pd.Series(self.rs.center_, index=X.columns)
self.scale_ = pd.Series(self.rs.scale_, index=X.columns)
return self
def transform(self, X):
# assumes X is a DataFrame
Xrs = self.rs.transform(X)
Xscaled = pd.DataFrame(Xrs, index=X.index, columns=X.columns)
return Xscaled
class ColumnExtractor(BaseEstimator, TransformerMixin):
""" Given a list of columns and optionally a list of columns to include/exclude,
filter a dataframe down to the selected columns.
"""
def __init__(self, cols=None, include=None, exclude=None):
"""
Parameters
----------
cols: list
A list of string column names to subset the data to
exclude: list
A list of string columns to exclude from the dataframe
include: list
A list of string columns to include in the dataframe
"""
self.cols = cols
self.include = include
self.exclude = exclude
def fit(self, X, y=None):
## Default to all columns if none were passed
if not self.cols:
self.cols = X.columns.values.tolist()
# Filter out unwanted columns
if self.exclude:
self.cols = [col for col in self.cols if col not in self.exclude]
# Filter down to subset of desired columns
if self.include:
self.cols = [col for col in self.cols if col in self.include]
return self
def transform(self, X):
# assumes X is a DataFrame
Xcols = X[self.cols]
return Xcols
class DFDummyTransformer(TransformerMixin):
# Transforms dummy variables from a list of columns
def __init__(self, columns=None):
self.columns = columns
def fit(self, X, y=None):
# Assumes no columns provided, in which case all columns will be transformed
if not self.columns:
self.already_binary_cols = df_binary_columns_list(X)
self.cols_to_transform = list(set(X.columns.values.tolist()).difference(self.already_binary_cols))
# Encode the rest of the columns
self.dummy_encoded_cols = pd.get_dummies(X[self.cols_to_transform])
if self.columns:
self.cols_to_transform = self.columns
# Encode the rest of the columns
self.dummy_encoded_cols = pd.get_dummies(X[self.cols_to_transform])
return self
def transform(self, X):
# assumes X is a DataFrame
# Remove the encoded columns from original
X_transform = X[list(set(X.columns.values.tolist()).difference(self.cols_to_transform))]
# Merge on encoded cols
X_transform = pd.merge(X_transform, self.dummy_encoded_cols, left_index=True, right_index=True)
return X_transform
def df_binary_columns_list(df):
""" Returns a list of binary columns (unique values are either 0 or 1)"""
binary_cols = [col for col in df if
df[col].dropna().value_counts().index.isin([0,1]).all()]
return binary_cols
class ZeroFillTransformer(TransformerMixin):
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
Xz = X.fillna(value=0)
return Xz
class Log1pTransformer(TransformerMixin):
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
Xlog = np.log1p(X)
return Xlog
class DateFormatter(TransformerMixin):
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
Xdate = X.apply(pd.to_datetime)
return Xdate
class DateDiffer(TransformerMixin):
def fit(self, X, y=None):
# stateless transformer
return self
def transform(self, X):
# assumes X is a DataFrame
beg_cols = X.columns[:-1]
end_cols = X.columns[1:]
Xbeg = X[beg_cols].as_matrix()
Xend = X[end_cols].as_matrix()
Xd = (Xend - Xbeg) / np.timedelta64(1, 'D')
diff_cols = ['->'.join(pair) for pair in zip(beg_cols, end_cols)]
Xdiff = pd.DataFrame(Xd, index=X.index, columns=diff_cols)
return Xdiff
class DummyTransformer(TransformerMixin):
def __init__(self):
self.dv = None
def fit(self, X, y=None):
# assumes all columns of X are strings
Xdict = X.to_dict('records')
self.dv = DictVectorizer(sparse=False)
self.dv.fit(Xdict)
return self
def transform(self, X):
# assumes X is a DataFrame
Xdict = X.to_dict('records')
Xt = self.dv.transform(Xdict)
cols = self.dv.get_feature_names()
Xdum = pd.DataFrame(Xt, index=X.index, columns=cols)
# drop column indicating NaNs
nan_cols = [c for c in cols if '=' not in c]
Xdum = Xdum.drop(nan_cols, axis=1)
return Xdum
class MultiEncoder(TransformerMixin):
# Multiple-column MultiLabelBinarizer for pandas DataFrames
def __init__(self, sep=','):
self.sep = sep
self.mlbs = None
def _col_transform(self, x, mlb):
cols = [''.join([x.name, '=', c]) for c in mlb.classes_]
xmlb = mlb.transform(x)
xdf = | pd.DataFrame(xmlb, index=x.index, columns=cols) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = | tm.makeTimeDataFrame() | pandas._testing.makeTimeDataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([( | TS('2015-01-02') | pandas.Timestamp |
from SentinelTime.data_preprocessing import *
from SentinelTime.mask_stack import *
import rasterio.mask
import matplotlib.pyplot as plt
import pandas as pd
def extract_dates(directory, allowed_orbits):
"""
Extracts dates from list of preprocessed S-1 GRD files (need to be in standard pyroSAR exported naming scheme!)
:param directory: string
Path to folder, where files are stored
:return: list
returns list of acquisition dates of S-1 GRD files
"""
file_list = extract_files_to_list(path_to_folder=directory, datatype=".tif", path_bool=False)
new_file_list = []
for orbit in allowed_orbits:
for file in file_list:
if str(orbit) in file[len(file) - 8:len(file)]:
new_file_list.append(file)
date_list = []
for file in file_list:
date_list.append(int(file[2:10]))
return date_list
def extract_time_series(results_dir, shapefile, buffer_size, point_path, allowed_orbits, test):
"""
Extracts time series information from patches of pixels using points and a buffer size to specify the size of the
patch
:param shapefile: string
Path to point shapefile including name of shapefile
:param results_dir: string
Path to results directory, where layerstacks are stored and csv files will be stored
:param point_path: string
Path to point shapefile directory
:param buffer_size: int
Buffer size specifies the length of the rectangular buffer around the point
"""
# Import Patches for each class and all 4 layerstacks (VH/VV/Asc/Desc)
patches, lon_list, lat_list, ids = create_point_buffer(shapefile, buffer_size=buffer_size)
layer_stacks = extract_files_to_list(path_to_folder=results_dir, datatype=".tif", path_bool=True)
# Iterate through all layerstacks:
for file in layer_stacks:
src1 = rio.open(file)
patch_mean = []
# Iterate through all patches of current class
for patch in patches:
pixel_mean = []
out_image, out_transform = rio.mask.mask(src1, [patch], all_touched=1, crop=True, nodata=np.nan)
# Calculate Mean for each patch:
for pixel in out_image:
pixel_mean.append(np.nanmean(pixel))
patch_mean.append(pixel_mean)
# Append dates of acquisition to each list (will be stored as float, doesnt matter for processing):
if "VH" in file and "Asc" in file:
patch_mean.append(extract_dates(results_dir + "VH" + "/" + "Asc" + "/", allowed_orbits))
if "VH" in file and "Desc" in file:
patch_mean.append(extract_dates(results_dir + "VH" + "/" + "Desc" + "/", allowed_orbits))
if "VV" in file and "Asc" in file:
patch_mean.append(extract_dates(results_dir + "VV" + "/" + "Asc" + "/", allowed_orbits))
if "VV" in file and "Desc" in file:
patch_mean.append(extract_dates(results_dir + "VV" + "/" + "Desc" + "/", allowed_orbits))
print(patch_mean)
# Rotate array, so csv file will have correct orientation:
patch_mean = np.rot90(patch_mean)
patch_mean = np.rot90(patch_mean)
patch_mean = np.rot90(patch_mean)
patch_mean = patch_mean.tolist()
src1.close()
# for i, date in enumerate(patch_mean):
# patch_mean[i][0] = int(patch_mean[i][0])
# print(patch_mean[5][0])
# print(patch_mean)
# Create CSV export directory and create header string with length equal to the number of patcher per class:
csv_result_dir = results_dir + "CSV/"
if not os.path.exists(csv_result_dir):
os.mkdir(csv_result_dir)
if "VH" in file:
pol1 = "VH"
vh_head_string = "VH"
tmp = ","
for i, elem in enumerate(patches):
vh_head_string = vh_head_string + str(i) + tmp + pol1
if "VV" in file:
pol1 = "VV"
vv_head_string = "VV"
tmp = ","
for i, elem in enumerate(patches):
vv_head_string = vv_head_string + str(i) + tmp + pol1
# Export patch means to csv files for each class, polarization and flight direction:
if "VH" in file and "Asc" in file:
# print(patch_mean)
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VH_Asc.csv",
patch_mean, delimiter=",", header="date," + vh_head_string[0:len(vh_head_string) - 3], fmt="%f")
if "VH" in file and "Desc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VH_Desc.csv",
patch_mean, delimiter=",", header="date," + vh_head_string[0:len(vh_head_string) - 3], fmt="%f")
if "VV" in file and "Asc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VV_Asc.csv",
patch_mean, delimiter=",", header="date," + vv_head_string[0:len(vv_head_string) - 3], fmt="%f")
if "VV" in file and "Desc" in file:
np.savetxt(csv_result_dir + shapefile[len(point_path):len(shapefile) - 4] + "_VV_Desc.csv",
patch_mean, delimiter=",", header="date," + vv_head_string[0:len(vv_head_string) - 3], fmt="%f")
def import_time_series_csv(path_to_folder, frost_bool):
"""
Imports csv files from results folder
:param frost_bool:
:param path_to_folder: string
Path to folder, where csv files are stored
:return: tuple
returns tuple of lists containing the dataframe names and the dataframes itself
"""
csv_list = extract_files_to_list(path_to_folder, datatype=".csv", path_bool=False)
df_name_list = []
df_list = []
for csv in csv_list:
df = pd.read_csv(path_to_folder + csv)
df = df.rename({"# date": "date"}, axis=1)
# Change datatype of date from float to date object:
df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
# if frost_bool:
# df, precip = import_weather_for_fern(radar_df=df)
if frost_bool:
df, weather = import_weather_for_fern(radar_df=df, frost_bool=frost_bool)
if not frost_bool:
weather = import_weather_for_fern(radar_df=df, frost_bool=frost_bool)
df_name_list.append(csv[0:len(csv) - 4])
df_list.append(df)
return df_name_list, df_list, weather
def temporal_statistics(path_to_csv_folder, results_dir, fig_folder, plot_bool, frost_bool):
"""
Function calculates temporal statistics for all classes, polarizations and flight directions
:param fig_folder:
:param frost_bool:
:param path_to_csv_folder:
Path to folder, where csv files are stored
:param results_dir:
:param plot_bool: boolean
If set to True, charts of mean and std.dev. are plotted
:return: dict
Returns dictionary containing dictionaries with the temporal statistics for all classes, polarizations and
flight directions
"""
import csv
from scipy.ndimage.filters import gaussian_filter1d
df_name_list, df_list, weather = import_time_series_csv(path_to_csv_folder, frost_bool)
statistics_dict = {}
# print(df_name_list)
# Iterate through all dataframes and compute temporal statistics
for i, df in enumerate(df_list):
# print(df)
# Temporal Mean:
df["patches_mean"] = df.mean(axis=1)
# print(df_name_list[i])
statistics_dict[df_name_list[i]] = {"Temporal Mean": round(df["patches_mean"].mean(), 3)}
statistics_dict[df_name_list[i]]["Temporal Median"] = round(df["patches_mean"].median(), 3)
# Temporal Standard Deviation:
df["patches_std"] = df.std(axis=1)
statistics_dict[df_name_list[i]]["Temporal Stdev."] = round(df["patches_std"].mean(), 3)
# Max., Min. and Amplitude:
statistics_dict[df_name_list[i]]["Temporal Max."] = round(df["patches_mean"].max(), 3)
statistics_dict[df_name_list[i]]["Temporal Min."] = round(df["patches_mean"].min(), 3)
statistics_dict[df_name_list[i]]["Temporal Amp."] = round(df["patches_mean"].max()
- df["patches_mean"].min(), 3)
print(statistics_dict)
dataframe_list1 = []
dataframe_list2 = []
dataframe_list3 = []
dataframe_list4 = []
tmp = 0
# Iterate through a quarter of the csv files to account for all four possible options of VH/VV/Asc/Desc
for j in range(0, int(len(df_name_list) / 4)):
# Iterate through Mean and Std.Dev.:
for k, elem in enumerate(["patches_mean"]):
# Plot mean of all patches over time if boolean is TRUE
if plot_bool:
plt.figure(figsize=(16, 9))
plt.rcParams.update({'font.size': 14})
# TODO: make weather data stuff optional!!!!
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig, ax1 = plt.subplots()
fig.subplots_adjust(right=0.75)
fig.set_figheight(9)
fig.set_figwidth(15)
ax2 = ax1.twinx()
ax3 = ax1.twinx()
ax3.spines["right"].set_position(("axes", 1.1))
make_patch_spines_invisible(ax3)
ax3.spines["right"].set_visible(True)
# plt.figure(figsize=(16, 9))
# plt.rcParams.update({'font.size': 14})
if k == 0:
# ax1.figure(figsize=(16, 9))
plt.title('Mean of all Patches for class: ' + str(df_name_list[tmp][0:17]))
if k == 1:
# ax1.figure(figsize=(16, 9))
plt.title('Std.Dev. of all Patches for class: ' + str(df_name_list[tmp][0:17]))
ax1.plot('date', elem, data=df_list[tmp], marker='', color='k', linewidth=0.7, label="")
ax1.plot('date', elem, data=df_list[tmp + 1], marker='', color='forestgreen', linewidth=0.7, label="")
# print(df_name_list[tmp + 3])
# print(df_name_list[tmp + 2])
ax1.plot('date', elem, data=df_list[tmp + 2], marker='', color='b', linewidth=0.7, label="")
ax1.plot('date', elem, data=df_list[tmp + 3], marker='', color='firebrick', linewidth=0.7, label="")
# filter time series using gaussian filter:
arr1 = gaussian_filter1d(df_list[tmp]["patches_mean"].to_numpy(), sigma=2)
arr2 = gaussian_filter1d(df_list[tmp + 1]["patches_mean"].to_numpy(), sigma=2)
arr3 = gaussian_filter1d(df_list[tmp + 2]["patches_mean"].to_numpy(), sigma=2)
arr4 = gaussian_filter1d(df_list[tmp + 3]["patches_mean"].to_numpy(), sigma=2)
# append filtered datasets to lists for further use:
dataframe_list1.append(arr1)
dataframe_list2.append(arr2)
dataframe_list3.append(arr3)
dataframe_list4.append(arr4)
# Plot filtered mean of all patches over time if boolean is TRUE
if plot_bool:
#
ax1.plot(df_list[tmp]['date'], arr1, marker='', color='k', linewidth=3,
label=df_name_list[tmp][18:len(df_name_list[tmp])])
ax1.plot(df_list[tmp + 1]['date'], arr2, marker='', color='forestgreen', linewidth=3,
label=df_name_list[tmp + 1][18:len(df_name_list[tmp + 1])])
ax1.plot(df_list[tmp + 2]['date'], arr3, marker='', color='b', linewidth=3,
label=df_name_list[tmp + 2][18:len(df_name_list[tmp + 2])])
ax1.plot(df_list[tmp + 3]['date'], arr4, marker='', color='firebrick', linewidth=3,
label=df_name_list[tmp + 3][18:len(df_name_list[tmp + 3])])
# TODO: make weather data stuff optional!!!!
print(df_name_list[tmp + 3][18:len(df_name_list[tmp + 3])])
# plt.xlabel("Date")
ax1.set_xlabel('Date')
ax1.set_ylabel('Backscatter (dB)')
# plt.ylabel("Backscatter (dB)")
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, 1.005),
ncol=4, fancybox=True, shadow=True)
plt.ylim((-18, -7))
print(weather)
ax2.plot(weather['date'], weather['precip'], color="silver")
# plt.ylabel("Precipitation (mm)")
ax2.set_ylabel('Precipitation (mm)', color="silver")
# plt.ylim((-10, 50))
ax2.set_ylim(-10, 110)
ax3.plot(weather['date'], weather['temp'], color="orange")
# plt.ylabel("Precipitation (mm)")
ax3.set_ylabel('Avg_Temp (°C)', color="orange")
# plt.ylim((-10, 110))
ax3.set_ylim(-10, 110)
plt.savefig(fig_folder + "Mean_for_Class_test" + str(df_name_list[tmp][0:17]) + ".png", dpi=300)
plt.show()
# Increase tmp by 4 to get to the next class
tmp = tmp + 4
# Export temporal statistics to csv file:
with open(results_dir + 'Temp_Statistics.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in statistics_dict.items():
# print(value)
writer.writerow([key, value])
return dataframe_list1, dataframe_list2, dataframe_list3, dataframe_list4, df_list
def ratio_calc(path_to_folder, plot_bool, frost_bool):
"""
This function calculates the VH/VV ratio for all classes and flight directions and allows the user to plot the data
:param frost_bool: XXXXXXXXXXXXXXXXXXXXXXXX
:param path_to_folder: string
Path to folder, where csv files are stored
:param plot_bool: boolean
If set to TRUE, the plots are calculated and shown
:return: list
Returns a list of dataframes containing VH/VV ratios for all classes and flight directions
"""
| pd.set_option('display.max_columns', None) | pandas.set_option |
import pandas as __pd
import datetime as __dt
from dateutil import relativedelta as __rd
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
import requests as __requests
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "production/"
def santraller(tarih=__dt.datetime.now().strftime("%Y-%m-%d")):
"""
İlgili tarihte EPİAŞ sistemine kayıtlı YEKDEM santral bilgilerini vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Santral Bilgileri(Id, Adı, EIC Kodu, Kısa Adı)
"""
if __dogrulama.__tarih_dogrulama(tarih):
try:
particular_url = __first_part_url + "renewable-sm-licensed-power-plant-list?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["powerPlantList"])
df.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu",
"shortName": "Kısa Adı"}, inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def kurulu_guc(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığına tekabül eden aylar için EPİAŞ sistemine kayıtlı YEKDEM santrallerin kaynak bazlı toplam
kurulu güç bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m')
son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m')
date_list = []
while ilk <= son and ilk <= __dt.datetime.today():
date_list.append(ilk.strftime("%Y-%m-%d"))
ilk = ilk + __rd.relativedelta(months=+1)
with __Pool(__mp.cpu_count()) as p:
df_list = p.map(__yekdem_kurulu_guc, date_list)
return __pd.concat(df_list, sort=False)
def lisansli_uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanslı santrallerin kaynak bazında uzlaştırmaya esas veriş
miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanslı UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-licensed-injection-quantity" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableSMProductionList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "landfillGas": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def lisanssiz_uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanssiz santrallerin kaynak bazında uzlaştırmaya esas veriş
miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanssiz UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-unlicenced-generation-amount" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableUnlicencedGenerationAmountList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "lfg": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Kanal Tipi", "Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için YEKDEM kapsamındaki santrallerin kaynak bazında uzlaştırmaya esas veriş miktarı (UEVM)
bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-production" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableSMProductionList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "landfillGas": "Çöp Gazı",
"geothermal": "Jeotermal", "dammedHydroWithReservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def lisansli_gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), santral_id=""):
"""
İlgili tarih aralığı için YEKDEM kapsamındaki lisanslı santrallerin toplam gerçek zamanlı üretim bilgisini
vermektedir.
Not: "santral_id" değeri girildiği taktirde santrale ait gerçek zamanlı üretim bilgisini vermektedir.
Girilmediği taktirde toplam gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
santral_id : metin yada tam sayı formatında santral id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Üretim("Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam")
"""
if __dogrulama.__baslangic_bitis_tarih_id_dogrulama(baslangic_tarihi, bitis_tarihi, santral_id):
if santral_id == "":
return __gerceklesen(baslangic_tarihi, bitis_tarihi)
else:
return __santral_bazli_gerceklesen(baslangic_tarihi, bitis_tarihi, santral_id)
def birim_maliyet(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için YEKDEM birim maliyet bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Dönemlik YEKDEM Birim Maliyet (₺/MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-unit-cost" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableSMUnitCostList"])
df["Dönem"] = df["id"].apply(
lambda x: str(__pd.to_datetime(x["donem"][:10]).month_name(locale='tr_TR.UTF-8')) + "-" + str(
__pd.to_datetime(x["donem"][:10]).year))
df["Versiyon"] = df["id"].apply(
lambda x: str(__pd.to_datetime(x["versiyon"][:10]).month_name(locale='tr_TR.UTF-8')) + "-" + str(
__pd.to_datetime(x["versiyon"][:10]).year))
df.rename(index=str, columns={"unitCost": "Birim Maliyet (TL)"}, inplace=True)
df = df[["Dönem", "Versiyon", "Birim Maliyet (TL)"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def donemsel_maliyet(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için dönemlik YEKDEM maliyetleri bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Dönemsel YEKDEM Maliyeti (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewables-support" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewablesSupports"])
df["Dönem"] = df["period"].apply(
lambda x: str(__pd.to_datetime(x[:10]).month_name(locale='tr_TR.UTF-8')) + "-" + str(
__pd.to_datetime(x[:10]).year))
df.rename(index=str,
columns={"unitCost": "Birim Maliyet (TL)", "licenseExemptCost": "Lisanssız Toplam Maliyet (TL)",
"renewablesTotalCost": "Toplam Maliyet (TL)",
"reneablesCost": "Lisanlı Toplam Maliyet (TL)",
"portfolioIncome": "Toplam Gelir (TL)"},
inplace=True)
df = df[
["Dönem", "Birim Maliyet (TL)", "Lisanssız Toplam Maliyet (TL)", "Lisanlı Toplam Maliyet (TL)",
"Toplam Maliyet (TL)", "Toplam Gelir (TL)"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def res_uretim_tahmini():
"""
Türkiye geneli izlenebilen RES'lerin ertesi gün için toplam güç üretim tahmini bilgisini vermektedir.
Not: İlgili veri ritm.gov.tr üzerinden temin edilmektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
RES Üretim Tahmini (MWh)
"""
r = __requests.get("http://www.ritm.gov.tr/amline/data_file_ritm.txt")
df = __pd.DataFrame(r.text.split("\n")[1:][:-1])
df = __pd.DataFrame(df[0].str.split(",").tolist(), columns=["Tarih", "Q5", "Q25", "Q75", "Q95", "Tahmin", "Üretim"])
df["Saat"] = df["Tarih"].apply(lambda x: x.split(" ")[1])
df["Tarih"] = df["Tarih"].apply(lambda x: __pd.to_datetime(x.split(" ")[0], format="%d.%m.%Y"))
df = df[["Tarih", "Saat", "Q5", "Q25", "Q75", "Q95", "Tahmin", "Üretim"]]
return df
def __gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik YEKDEM kapsamındaki lisanslı santrallerin kaynak bazında gerçek zamanlı üretim
bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik YEKDEM Lisanslı UEVM (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "renewable-sm-licensed-real-time-generation" + "?startDate=" + baslangic_tarihi + \
"&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableLicencedGenerationAmount"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "lfg": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı",
"Biyogaz", "Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __santral_bazli_gerceklesen(baslangic_tarihi, bitis_tarihi, santral_id):
"""
İlgili tarih aralığı ve YEKDEM kapsamındaki lisanslı santral için gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi
santral_id : metin yada tam sayı formatında santral id
Geri Dönüş Değeri
-----------------
Santral Bazlı Gerçek Zamanlı Üretim("Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür",
"Rüzgar", "Güneş", "Fuel Oil", "Jeo Termal", "Asfaltit Kömür", "Taş Kömür", "Biokütle", "Nafta", "LNG",
"Uluslararası", "Toplam")
"""
try:
particular_url = __first_part_url + "renewable-sm-licensed-real-time-generation_with_powerplant" + \
"?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi + "&powerPlantId=" + \
str(santral_id)
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["renewableLicencedGenerationAmount"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"canalType": "Kanal Tipi", "riverType": "Nehir Tipi", "biogas": "Biyogaz",
"biomass": "Biyokütle", "lfg": "Çöp Gazı", "sun": "Güneş",
"geothermal": "Jeotermal", "reservoir": "Rezervuarlı", "wind": "Rüzgar",
"total": "Toplam", "others": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Rüzgar", "Jeotermal", "Rezervuarlı", "Kanal Tipi", "Nehir Tipi", "Çöp Gazı", "Biyogaz",
"Güneş", "Biyokütle", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __yekdem_kurulu_guc(tarih):
"""
İlgili tarih için EPİAŞ sistemine kayıtlı yekdem kapsamındaki santrallerin kaynak bazlı toplam kurulu güç bilgisini
vermektedir.
Parametre
----------
tarih : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Kurulu Güç Bilgisi (Tarih, Kurulu Güç)
"""
try:
particular_url = __first_part_url + "installed-capacity-of-renewable?period=" + tarih
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["installedCapacityOfRenewableList"])
columns = df["capacityType"].values
df = df[["capacity"]].transpose()
df.set_axis(columns, axis=1, inplace=True)
df.reset_index(drop=True, inplace=True)
df.insert(loc=0, column="Tarih", value= | __pd.to_datetime(tarih) | pandas.to_datetime |
import numpy as np
from ..util.math import range_step
from ..util.functions import composer
from pandas import Series
_distribution_samples = {
'float': {
'normal': lambda **kw: composer(
lambda **kw: np.random.normal(kw['mean'], kw['std'], kw['size']),
**kw
),
'uniform': lambda **kw: composer(
lambda **kw: np.random.uniform(kw['min'], kw['max'], kw['size']),
**kw
),
'range': lambda **kw: composer(
lambda **kw: np.arange(kw['start'], kw['end'], range_step(kw['start'], kw['end'], kw['size'])),
lambda f, **kw: f.astype(float)[:kw['size']],
**kw
)
},
'integer': {
'uniform': lambda **kw: composer(
lambda **kw: np.random.randint(kw['min'], kw['max'], kw['size']),
**kw
),
'binomial': lambda **kw: composer(
lambda **kw: np.random.binomial(kw['n'], kw['p'], kw['size']),
**kw
),
'range': lambda **kw: composer(
lambda **kw: np.arange(kw['start'], kw['end'], range_step(kw['start'], kw['end'], kw['size'])),
lambda f, **kw: f.astype(int)[:kw['size']],
**kw
)
}
}
TYPES = { 'float', 'integer' }
DISTRIBUTIONS = {
'normal': { 'mean', 'std' },
'uniform': { 'min', 'max' },
'binomial' : { 'n', 'p' },
'range': { 'start', 'end' }
}
def base_props(dtype: str, size: int, **props):
distr = props.get('distr') or 'uniform'
if distr not in {'uniform', 'range'}:
return props
bound_labels = {
'uniform': {
'low': 'min',
'high': 'max'
},
'range': {
'low': 'start',
'high': 'end'
}
}
low = props.get('min') if dtype == 'normal' else props.get('start')
high = props.get('max') if dtype == 'normal' else props.get('end')
if not low and high:
low = high - size
elif not high and low:
high = low + size
else:
low = low or 0
high = high or size
return {
**props, **{
'distr': distr,
bound_labels[distr]['low']: low,
bound_labels[distr]['high']: high
}
}
def get_sample(type_: str, size: int, **props):
size = size or props['size']
props = base_props(type_, size, **props)
distr = props['distr']
nums = _distribution_samples[type_][distr](**{**props, 'size': size})
if distr == 'uniform' and props.get('round'):
np.around(nums, props['round'])
return | Series(nums) | pandas.Series |
#!python3
import argparse
import pandas as pd
import numpy as np
from scipy.optimize import brentq
from plot_module import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output', default="theoretical_eq", type=str, dest="output")
parser.add_argument('--exon_size', default=300, type=int, dest="n")
parser.add_argument('--population_size', default=10000, type=float, dest="population_size")
parser.add_argument('--alpha', default=-118, type=float, dest="alpha")
parser.add_argument('--gamma', default=1.0, type=float, dest="gamma")
parser.add_argument('--beta', default=1.686, type=float, dest="beta")
parser.add_argument('--nbr_states', default=20, type=int, dest="nbr_states")
args, unknown = parser.parse_known_args()
dict_df = dict()
def delta_g(x, alpha):
return alpha + args.gamma * args.n * x
def sel_coeff(x, alpha):
edg = np.exp(args.beta * delta_g(x, alpha))
return args.gamma * args.beta * edg / (1 + edg)
def scaled_sel_coeff(x, alpha):
return 4 * args.population_size * sel_coeff(x, alpha)
def mut_bias(x):
if x == 0.:
return float("inf")
elif x == 1.0:
return -float("inf")
return np.log((1 - x) / x) + np.log(args.nbr_states - 1)
def self_consistent_eq(x, alpha):
return mut_bias(x) - scaled_sel_coeff(x, alpha)
x_eq = brentq(lambda x: self_consistent_eq(x, args.alpha), 0.0, 1.0, full_output=True)[0]
assert (x_eq <= 0.5)
s = sel_coeff(x_eq, args.alpha)
S = 4 * args.population_size * s
assert ((S - mut_bias(x_eq)) < 1e-5)
x_min, x_max = 0, 0.5
y_min, y_max = 0, S * 2
x_range = np.linspace(x_min, x_max, 200)
label = "$\\alpha={0:.2f}, \\gamma={1:.2f}, n={2}, Ne={3:.2f}$"
plt.figure(figsize=(1920 / my_dpi, 1080 / my_dpi), dpi=my_dpi)
plt.plot(x_range, [mut_bias(i) for i in x_range], linewidth=3, label="$ln[(1-x)/x]$")
line, = plt.plot(x_range, [scaled_sel_coeff(i, args.alpha) for i in x_range], linewidth=3,
label="S: " + label.format(args.alpha, args.gamma, args.n, args.population_size))
plt.plot(x_range, [10 * scaled_sel_coeff(i, args.alpha) for i in x_range],
linestyle="--", color=line.get_color(), linewidth=3,
label="S: " + label.format(args.alpha, args.gamma, args.n, 10 * args.population_size))
dict_df["x"] = [x_eq]
dict_df["ΔG"] = [delta_g(x_eq, args.alpha)]
dict_df["s"] = [s]
dict_df["S"] = [S]
dict_df["dNdS"] = [x_eq * S / (1 - np.exp(-S)) + (1 - x_eq) * -S / (1 - np.exp(S))]
args.gamma *= 0.1
args.alpha = brentq(lambda a: s - sel_coeff(x_eq, a), 10 * args.alpha, 0.1 * args.alpha, full_output=True)[0]
line, = plt.plot(x_range, [scaled_sel_coeff(i, args.alpha) for i in x_range], linewidth=3,
label="S: " + label.format(args.alpha, args.gamma, args.n, args.population_size))
plt.plot(x_range, [10 * scaled_sel_coeff(i, args.alpha) for i in x_range],
linestyle="--", color=line.get_color(), linewidth=3,
label="S: " + label.format(args.alpha, args.gamma, args.n, 10 * args.population_size))
plt.legend(fontsize=legend_size)
plt.xlim((x_min, x_max))
plt.ylim((y_min, y_max))
plt.tight_layout()
plt.savefig("{0}.pdf".format(args.output), format="pdf", dpi=my_dpi)
| pd.DataFrame(dict_df) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle as pk
import glob
import os
county_list = os.listdir('data/set_features')
print(county_list)
to_remove = []
for id, file_name_ in enumerate(county_list[0:1]):
print(id)
s = pd.read_parquet('data/set_features/' + file_name_, engine='pyarrow')
# for i in s.index:
# print(s.loc[i].tolist())
nan_list = s.columns[s.isna().any()].tolist()
str_list = []
for c in s.columns:
# if s[c].dtype == object:
# print('damn')
# print(c)
# print(s[c])
if isinstance(s.iloc[0][c], str):
str_list.append(c)
non_numer_list = []
for c in s.columns:
if not | pd.to_numeric(s[c], errors='coerce') | pandas.to_numeric |
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns; sns.set(style="ticks", color_codes=True)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.feature_selection import RFE
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
dataset = pd.read_csv("../input/train.csv", names=['Store','Dept','Date','weeklySales','isHoliday'],sep=',', header=0)
features = pd.read_csv("../input/features.csv",sep=',', header=0,
names=['Store','Date','Temperature','Fuel_Price','MarkDown1','MarkDown2','MarkDown3','MarkDown4',
'MarkDown5','CPI','Unemployment','IsHoliday']).drop(columns=['IsHoliday'])
stores = pd.read_csv("../input/stores.csv", names=['Store','Type','Size'],sep=',', header=0)
dataset = dataset.merge(stores, how='left').merge(features, how='left')
# dataset["nextWeekHoliday"] = dataset["isHoliday"].shift(-1).fillna(False)
# dataset["next2WeekHoliday"] = dataset["isHoliday"].shift(-2).fillna(False)
dataset
# # Data exploration
# In[ ]:
def scatter(dataset, column):
plt.figure()
plt.scatter(dataset[column] , dataset['weeklySales'])
plt.ylabel('weeklySales')
plt.xlabel(column)
# In[ ]:
scatter(dataset, 'Fuel_Price')
scatter(dataset, 'Size')
scatter(dataset, 'CPI')
scatter(dataset, 'Type')
scatter(dataset, 'isHoliday')
scatter(dataset, 'Unemployment')
scatter(dataset, 'Temperature')
scatter(dataset, 'Store')
scatter(dataset, 'Dept')
# In[ ]:
fig = plt.figure(figsize=(18, 14))
corr = dataset.corr()
c = plt.pcolor(corr)
plt.yticks(np.arange(0.5, len(corr.index), 1), corr.index)
plt.xticks(np.arange(0.5, len(corr.columns), 1), corr.columns)
fig.colorbar(c)
# In[ ]:
sns.pairplot(dataset, vars=['weeklySales', 'Fuel_Price', 'Size', 'CPI', 'Dept', 'Temperature', 'Unemployment'])
# In[ ]:
sns.pairplot(dataset.fillna(0), vars=['weeklySales', 'MarkDown1', 'MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'])
# In[ ]:
for name, group in dataset.groupby(["Store", "Dept"]):
plt.title(name)
plt.scatter(range(len(group)), group["weeklySales"])
plt.show()
break
# # Data manipulation
# In[ ]:
dataset = pd.get_dummies(dataset, columns=["Type"])
dataset[['MarkDown1','MarkDown2','MarkDown3','MarkDown4', 'MarkDown5']] = dataset[['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5']].fillna(0)
dataset['Month'] = pd.to_datetime(dataset['Date']).dt.month
dataset = dataset.drop(columns=["Date", "CPI", "Fuel_Price", 'Unemployment', 'MarkDown3'])
dataset
# # Algorithms
# In[ ]:
def knn():
knn = KNeighborsRegressor(n_neighbors=10)
return knn
def extraTreesRegressor():
clf = ExtraTreesRegressor(n_estimators=100,max_features='auto', verbose=1, n_jobs=1)
return clf
def randomForestRegressor():
clf = RandomForestRegressor(n_estimators=100,max_features='log2', verbose=1)
return clf
def svm():
clf = SVR(kernel='rbf', gamma='auto')
return clf
def nn():
clf = MLPRegressor(hidden_layer_sizes=(10,), activation='relu', verbose=3)
return clf
def predict_(m, test_x):
return pd.Series(m.predict(test_x))
def model_():
# return knn()
return extraTreesRegressor()
# return svm()
# return nn()
# return randomForestRegressor()
def train_(train_x, train_y):
m = model_()
m.fit(train_x, train_y)
return m
def train_and_predict(train_x, train_y, test_x):
m = train_(train_x, train_y)
return predict_(m, test_x), m
#
# In[ ]:
def calculate_error(test_y, predicted, weights):
return mean_absolute_error(test_y, predicted, sample_weight=weights)
# # K-Fold Cross Validation
# In[ ]:
kf = KFold(n_splits=5)
splited = []
# dataset2 = dataset.copy()
for name, group in dataset.groupby(["Store", "Dept"]):
group = group.reset_index(drop=True)
trains_x = []
trains_y = []
tests_x = []
tests_y = []
if group.shape[0] <= 5:
f = np.array(range(5))
np.random.shuffle(f)
group['fold'] = f[:group.shape[0]]
continue
fold = 0
for train_index, test_index in kf.split(group):
group.loc[test_index, 'fold'] = fold
fold += 1
splited.append(group)
splited = pd.concat(splited).reset_index(drop=True)
# In[ ]:
splited
# In[ ]:
best_model = None
error_cv = 0
best_error = np.iinfo(np.int32).max
for fold in range(5):
dataset_train = splited.loc[splited['fold'] != fold]
dataset_test = splited.loc[splited['fold'] == fold]
train_y = dataset_train['weeklySales']
train_x = dataset_train.drop(columns=['weeklySales', 'fold'])
test_y = dataset_test['weeklySales']
test_x = dataset_test.drop(columns=['weeklySales', 'fold'])
print(dataset_train.shape, dataset_test.shape)
predicted, model = train_and_predict(train_x, train_y, test_x)
weights = test_x['isHoliday'].replace(True, 5).replace(False, 1)
error = calculate_error(test_y, predicted, weights)
error_cv += error
print(fold, error)
if error < best_error:
print('Find best model')
best_error = error
best_model = model
error_cv /= 5
# In[ ]:
error_cv
# In[ ]:
best_error
# # Test part
# In[ ]:
dataset_test = | pd.read_csv("../input/test.csv", names=['Store','Dept','Date','isHoliday'],sep=',', header=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[25]:
# import tabula
import pandas as pd
import requests
from urllib.request import urlopen
from lxml import etree
from collections import OrderedDict
from datetime import datetime
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[26]:
# In[27]:
data_url = "http://www.trabajo.gob.ar/estadisticas/eil/"
response = requests.get(data_url)
html = response.content
htmlparser = etree.HTMLParser()
tree = etree.fromstring(html, htmlparser)
file_urls = tree.xpath("//div[@class='row row-flex']/div[3]/a/@href")
file_url = "http://www.trabajo.gob.ar" + file_urls[0]
file_url
# In[28]:
print("reading file")
df = | pd.read_excel(file_url, sheet_name="Total aglos 1.1", skiprows=3,header=[0,1]) | pandas.read_excel |
print('Chapter 03: Scraping Extraction')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('setup.py')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BASE_DIR = ".."
def figNum():
figNum.counter += 1
return "{0:02d}".format(figNum.counter)
figNum.counter = 0
FIGPREFIX = 'ch03_fig'
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('settings.py')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
# common imports
import pandas as pd
import numpy as np
import math
import re
import glob
import os
import sys
import json
import random
import pprint as pp
import textwrap
import sqlite3
import logging
import spacy
import nltk
from tqdm.auto import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# pandas display options
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 60 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; -1 = all
# otherwise text between $ signs will be interpreted as formula and printed in italic
pd.set_option('display.html.use_mathjax', False)
# np.set_printoptions(edgeitems=3) # default 3
import matplotlib
from matplotlib import pyplot as plt
plot_params = {'figure.figsize': (8, 6),
'axes.labelsize': 'small',
'axes.titlesize': 'small',
'xtick.labelsize': 'small',
'ytick.labelsize':'small',
'figure.dpi': 100}
# adjust matplotlib defaults
matplotlib.rcParams.update(plot_params)
import seaborn as sns
sns.set_style("darkgrid")
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: download and interpret robots.txt')
import urllib.robotparser
rp = urllib.robotparser.RobotFileParser()
rp.set_url("https://www.reuters.com/robots.txt")
rp.read()
rp.can_fetch("*", "https://www.reuters.com/sitemap.xml")
rp.can_fetch("*", "https://www.reuters.com/finance/stocks/option")
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: finding URLs from sitemap.xml')
# might need to install xmltodict
import xmltodict
import requests
sitemap = xmltodict.parse(requests.get('https://www.reuters.com/sitemap_news_index1.xml').text)
# just see some of the URLs
urls = [url["loc"] for url in sitemap["urlset"]["url"]]
print("\n".join(urls[0:3]))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: finding URLs from RSS')
# might need to install feedparser
import feedparser
feed = feedparser.parse('http://web.archive.org/web/20200613003232if_/http://feeds.reuters.com/Reuters/worldNews')
print([(e.title, e.link) for e in feed.entries])
print([e.id for e in feed.entries])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Example: Downloading HTML pages with Python')
# %%time
import time
print('start stopwatch')
tstart = time.time()
s = requests.Session()
for url in urls[0:10]:
# get the part after the last / in URL and use as filename
file = url.split("/")[-1]
r = s.get(url)
with open(file, "w+b") as f:
f.write(r.text.encode('utf-8'))
tend = (time.time() - tstart)
print('end stopwatch\nelapsed time: {} seconds'.format(round(tend, 3)))
with open("urls.txt", "w+b") as f:
f.write("\n".join(urls).encode('utf-8'))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extraction with regular expressions')
url = 'https://www.reuters.com/article/us-health-vaping-marijuana-idUSKBN1WG4KT'
file = url.split("/")[-1] + ".html"
r = requests.get(url)
with open(file, "w+") as f:
f.write(r.text)
import re
with open(file, "r") as f:
html = f.read()
g = re.search(r'<title>(.*)</title>', html, re.MULTILINE|re.DOTALL)
if g:
print(g.groups()[0])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Using an HTML parser for extraction')
WA_PREFIX = "http://web.archive.org/web/20200118131624/"
html = s.get(WA_PREFIX + url).text
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
print(soup.select("h1.ArticleHeader_headline"))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting the title/headline')
print(soup.h1)
print(soup.h1.text)
print(soup.title.text)
print(soup.title.text.strip())
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('')
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting the article text')
print(soup.select_one("div.StandardArticleBody_body").text)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting image captions')
print(soup.select("div.StandardArticleBody_body figure"))
print(soup.select("div.StandardArticleBody_body figure img"))
print(soup.select("div.StandardArticleBody_body figcaption"))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting the URL')
print(soup.find("link", {'rel': 'canonical'})['href'])
print(soup.select_one("link[rel=canonical]")['href'])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting list information (authors)')
print(soup.find("meta", {'name': 'Author'})['content'])
sel = "div.BylineBar_first-container.ArticleHeader_byline-bar div.BylineBar_byline span"
print(soup.select(sel))
print([a.text for a in soup.select(sel)])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting text of links (section)')
print(soup.select_one("div.ArticleHeader_channel a").text)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting reading time')
print(soup.select_one("p.BylineBar_reading-time").text)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: extracting attributes (id)')
print(soup.select_one("div.StandardArticle_inner-container")['id'])
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting Attribution')
print(soup.select_one("p.Attribution_content").text)
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Extracting Timestamp')
ptime = soup.find("meta", { 'property': "og:article:published_time"})['content']
print(ptime)
from dateutil import parser
print(parser.parse(ptime))
print(parser.parse(soup.find("meta", { 'property': "og:article:modified_time"})['content']))
print('\n')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Blueprint: Spidering')
import requests
from bs4 import BeautifulSoup
import os.path
from dateutil import parser
def download_archive_page(page):
filename = "page-%06d.html" % page
if not os.path.isfile(filename):
url = "https://www.reuters.com/news/archive/" + \
"?view=page&page=%d&pageSize=10" % page
r = requests.get(url)
with open(filename, "w+") as f:
f.write(r.text)
def parse_archive_page(page_file):
with open(page_file, "r") as f:
html = f.read()
soup = BeautifulSoup(html, 'html.parser')
hrefs = ["https://www.reuters.com" + a['href']
for a in soup.select("article.story div.story-content a")]
return hrefs
def download_article(url):
# check if article already there
filename = url.split("/")[-1] + ".html"
if not os.path.isfile(filename):
r = requests.get(url)
with open(filename, "w+") as f:
f.write(r.text)
def parse_article(article_file):
def find_obfuscated_class(soup, klass):
try:
return soup.find_all(lambda tag: tag.has_attr("class") and (klass in " ".join(tag["class"])))
except Exception as err:
# print('find_obfuscated_class Exception: {}'.format(err))
return ''
with open(article_file, "r") as f:
html = f.read()
r = {}
soup = BeautifulSoup(html, 'html.parser')
try:
r['url'] = soup.find("link", {'rel': 'canonical'})['href']
r['id'] = r['url'].split("-")[-1]
r['headline'] = soup.h1.text
r['section'] = find_obfuscated_class(soup, "ArticleHeader-channel")[0].text
r['text'] = "\n".join([t.text for t in find_obfuscated_class(soup, "Paragraph-paragraph")])
r['authors'] = find_obfuscated_class(soup, "Attribution-attribution")[0].text
r['time'] = soup.find("meta", {'property': "og:article:published_time"})['content']
return r
except Exception as err:
# print('Exception: {}'.format(err))
return r
# download 10 pages of archive
for p in range(1, 10):
download_archive_page(p)
# parse archive and add to article_urls
import glob
article_urls = []
for page_file in glob.glob("page-*.html"):
article_urls += parse_archive_page(page_file)
# download articles
for url in article_urls:
download_article(url)
# arrange in pandas DataFrame
import pandas as pd
df = | pd.DataFrame() | pandas.DataFrame |
import csv
from datetime import date, timedelta
from os import path
import pandas as pd
from nba_api.stats.endpoints import leaguegamefinder, scoreboardv2
basepath = path.dirname(path.dirname(path.abspath(__file__)))
data_path = path.join(basepath, 'data', 'irl')
def write_data_file_for_date(date_param):
date_api_str = date_param.strftime("%m/%d/%Y") # the only format the NBA API accepts for some reason
print("getting data for {}".format(date_api_str))
gf = leaguegamefinder.LeagueGameFinder(
date_from_nullable=date_api_str,
date_to_nullable=date_api_str,
player_or_team_abbreviation='P', # per-player stats instead of per-team
league_id_nullable='00' # NBA only
)
frame = gf.get_data_frames()[0]
# since my csv files are partitioned by date, season_id and game_date can be dropped
# also, 'MATCHUP' contains the team abbrev, and team names change infrequently enough that it's not worth storing for every game log
# I keep everything else passed back by the API though
frame.drop(['SEASON_ID', 'TEAM_NAME', 'TEAM_ABBREVIATION', 'GAME_DATE'], axis=1, inplace=True)
frame.to_csv(path.join(data_path, 'game_logs', date_param.strftime('%Y-%m-%d.csv')), index=False)
today = date.today()
first_day = date(2019, 10, 21) # hardcoded, I know this was the (day before the) first day of the season
date_in_question = first_day
while path.exists(path.join(data_path, 'game_logs', date_in_question.strftime('%Y-%m-%d.csv'))):
date_in_question = date_in_question + timedelta(1)
# I now have the first date that does not exist; I want to go back and update the last one that *does* in case it's incomplete
if date_in_question is not first_day: # unless we're rebuilding the whole season, naturally
date_in_question = date_in_question - timedelta(1)
while date_in_question <= today:
write_data_file_for_date(date_in_question)
date_in_question = date_in_question + timedelta(1)
# now, let's fetch up-to-date standings and schedule (for today) data
print("getting today's schedule and league standings")
s = scoreboardv2.ScoreboardV2(day_offset=0, game_date=today.strftime("%m/%d/%Y"), league_id="00")
df = s.get_data_frames()
games_f = df[0][['GAME_ID', 'GAME_STATUS_TEXT', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID', 'NATL_TV_BROADCASTER_ABBREVIATION']]
games_f.columns = ['GAME_ID', 'START_TIME', 'HOME_ID', 'AWAY_ID', 'NATL_TV']
games_f.to_csv(path.join(data_path, 'schedule', today.strftime('%Y-%m-%d.csv')), index=False)
teams_f = | pd.concat([df[4], df[5]]) | pandas.concat |
import numpy
import pandas
#this is the file that contains our dot product code
import Daphnis.distance_methods.methods
#input parameters
cfmid_csv_address='/home/rictuar/coding_projects/fiehn_work/text_files/_cfmid_4_point_0_spectra_for_experimental_comparison/cfmid_output_csv_nist20_only_adduct_[M+H]+_msrb_relaced.csv'
empirical_csv_address='/home/rictuar/coding_projects/fiehn_work/text_files/nist20_hr_csv.txt'
adduct_of_interest='[M+H]+'
instrument_of_interest='_null_'
#the list of inchikeys the the experimental spectra must be in (nist20 only)
inchikey_nist20_only_address='/home/rictuar/coding_projects/fiehn_work/text_files/_attribute_values_and_counts/set_comparison_nist_20_only_InChIKey.txt'
number_of_metadata_columns=26
distance_method='dot_product'
classyfire_results_address='/home/rictuar/coding_projects/fiehn_work/text_files/_cfb_classyfire_results/classy_fire_results_csv.csv'
output_dataset_address='/home/rictuar/coding_projects/fiehn_work/text_files/_orthogonal_analysis_similarity_only/overall_similarity_result_dot_product_[M+H]+.csv'
cfmid_energy_list=['energy0','energy1','energy2']
#build the dict that will hold our new panda
#read in the experimental panda, 1 row to get columns
experimental_panda_one_row=pandas.read_csv(empirical_csv_address,sep='@@@',usecols=range(0,number_of_metadata_columns),nrows=1)
#declare dictionary using columns in experimental panda
output_dictionary={key: [] for key in experimental_panda_one_row.columns}
#add the classyfire, cfmid energy, and distance output keys
output_dictionary['energy#']=[]
output_dictionary['Superclass']=[]
output_dictionary[distance_method]=[]
#receives a link to a file that is single row after single row, returns set of entries
def read_single_list_to_set(file_address):
temp_file=open(file_address,'r')
line_set=set()
for line in temp_file:
line_set.add(line.rstrip())
return line_set
inchikey_set_nist_20=read_single_list_to_set(inchikey_nist20_only_address)
#read in the cfmid panda
cfmid_panda=pandas.read_csv(cfmid_csv_address,sep='¬',header=0)
#set of things cfmid fragmented
cfmid_fragmented_set=set(cfmid_panda['InChIKey'])
#ready in the classyfire_panda
classyfire_panda= | pandas.read_csv(classyfire_results_address,sep='\t',header=0,usecols=['InChIKey','Superclass']) | pandas.read_csv |
# coding: utf-8
# # Classification des Iris en utilisant tensorflow
# # I - Introduction
#
# ---
# #### Objectif
# <div style="text-align:justify;">L'objectif est de suivre un projet de Machine du concept à son intégration. Nous allons donc partir d'une base de données simple existant déjà sur internet. Nous allons ensuite concevoir un classificateur multiclasse à l'aide de tensorflow et mettre ce modèle en place sur une application mobile.</div>
#
# #### La base de données
# <div style="text-align:justify;">Nous allons utiliser la base de données de classification d'Iris du [site Kaggle](https://www.kaggle.com/uciml/iris). Dans cette base de données, il existe 3 labels: Iris-setosa, Iris-versicolor
# et Iris-virginica. Ces labels correspondent aux espèces d'Iris que nous souhaitons différencier. La base de données contient la largeur ainsi que la longueur des pétales et des sépales de 150 plantes.</div>
# # II - Génération du modèle
#
# ---
# ## 1. Exploration de la base de données
# In[1]:
import pandas as pd # Data Structure
import seaborn as sns # Data Vizualisation
# On commence par importer la base de données à l'aide de **pandas**.
# In[2]:
datas = pd.read_csv("datas/Iris.csv")
# In[3]:
display(datas.head())
print("Shape: {}".format(datas.shape))
# On utilise **seaborn** pour explorer graphiquement les données.
# In[4]:
g=sns.pairplot(datas, hue="Species", size=2.5)
# ## 2. Data Preprocessing
# ### 2.1 Drop Id
# L'id n'est d'aucune utilité, on s'en débarasse donc dès le début.
# In[5]:
datas.drop("Id", axis=1, inplace=True)
# ### 2.2 Séparation labels/features
# In[6]:
# On récupère les noms de toutes les colonnes
cols=datas.columns
# On les sépare
features = cols[0:4]
labels = cols[4]
print("Liste des features:")
for k in features:
print("- {}".format(k))
print("\nLabel: {}".format(labels))
# ### 2.3 Mélange des données
# In[7]:
import numpy as np # Manipulation de listes
# **numpy** est utilisé ici pour mélanger la base de données.
# In[8]:
indices = datas.index.tolist()
indices = np.array(indices)
np.random.shuffle(indices)
X = datas.reindex(indices)[features]
y = datas.reindex(indices)[labels]
# ### 2.4 Categorical to numerical
# On convertit les valeurs des labels qui sont des catégories en valeurs numériques pour être intérprétées par notre algorithme.
# In[9]:
y.head()
# In[10]:
from pandas import get_dummies
# In[11]:
y=get_dummies(y)
# In[12]:
display(y.head())
# ### 2.5 Train/Test split
#
# <div style="text-align:justify;"><br>Pour pouvoir évaluer la qualité de notre algorithme il faut séparer les données en deux. La base de données d'apprentissage est utilisée pour apprendre à l'algorithme comment classifier les données. Une fois que cela est fait, on est capable de prédire la classe avec une certaine précision. Pour vérifier si l'algorithme est capable de bien généraliser à des données qu'il n'a pas appris (éviter l'**overfitting**), on calcul la précision de l'algorithme pour prédire sur la base de données de test.</div>
#
# - Train: 80%
# - Test : 20%
# In[13]:
from sklearn.cross_validation import train_test_split
# In[14]:
y= | get_dummies(y) | pandas.get_dummies |
r"""Exp 4:
- Fix:
- n=53, f=?
- Number of iterations = 600
- Not *Long tail* (alpha=1)
- Always NonIID
- Number of runs = 3
- LR = 0.01
- Attack: IPM epsilon=0.1
- Aggregator: CP
- Varies:
- momentum=0, 0.9
- Bucketing: ?
Experiment:
- Fix f=5 varying s:
- s=0,2,5
- m=0,0.9
- Fix s=2 varying f:
- f=1,6,12
- m=0.0.9
"""
from utils import get_args
from utils import main
from utils import EXP_DIR
args = get_args()
assert args.noniid
assert not args.LT
assert args.attack == "IPM"
LOG_DIR = EXP_DIR + "exp4/"
if args.identifier:
LOG_DIR += f"{args.identifier}/"
elif args.debug:
LOG_DIR += "debug/"
else:
LOG_DIR += f"n{args.n}_{args.agg}_{args.attack}_{args.noniid}/"
INP_DIR = LOG_DIR
OUT_DIR = LOG_DIR + "output/"
LOG_DIR += f"f{args.f}_{args.momentum}_s{args.bucketing}_seed{args.seed}"
if args.debug:
MAX_BATCHES_PER_EPOCH = 30
EPOCHS = 3
else:
MAX_BATCHES_PER_EPOCH = 30
EPOCHS = 20
if not args.plot:
main(args, LOG_DIR, EPOCHS, MAX_BATCHES_PER_EPOCH)
else:
# Temporarily put the import functions here to avoid
# random error stops the running processes.
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from codes.parser import extract_validation_entries
# 5.5in is the text width of iclr2022 and 11 is the font size
font = {"size": 11}
plt.rc("font", **font)
def exp_grid1():
for seed in [0, 1, 2]:
for bucketing in [0, 2, 5]:
for momentum in [0.0, 0.9]:
yield momentum, bucketing, seed
results = []
for momentum, bucketing, seed in exp_grid1():
grid_identifier = f"f5_{momentum}_s{bucketing}_seed{seed}"
path = INP_DIR + grid_identifier + "/stats"
try:
values = extract_validation_entries(path)
for v in values:
results.append(
{
"Iterations": v["E"] * MAX_BATCHES_PER_EPOCH,
"Accuracy (%)": v["top1"],
r"$\beta$": momentum,
"seed": seed,
"s": str(bucketing),
}
)
except Exception as e:
pass
results = pd.DataFrame(results)
print(results)
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
results.to_csv(OUT_DIR + "exp4_fix_f.csv", index=None)
plt.figure(figsize=(4, 2))
# sns.set(font_scale=1.25)
g = sns.lineplot(
data=results,
x="Iterations",
y="Accuracy (%)",
style=r"$\beta$",
hue="s",
# height=2.5,
# aspect=1.3,
# legend=False,
# ci=None,
palette=sns.color_palette("Set1", 3),
)
g.set(xlim=(0, 600), ylim=(50, 100))
# Put the legend out of the figure
g.legend(loc="center left", bbox_to_anchor=(1, 0.5))
g.get_figure().savefig(OUT_DIR + "exp4_fix_f.pdf", bbox_inches="tight", dpi=720)
plt.figure(0)
def exp_grid2():
for seed in [0, 1, 2]:
for f in [1, 6, 12]:
for momentum in [0.0, 0.9]:
yield momentum, f, seed
results = []
for momentum, f, seed in exp_grid2():
grid_identifier = f"f{f}_{momentum}_s2_seed{seed}"
path = INP_DIR + grid_identifier + "/stats"
try:
values = extract_validation_entries(path)
for v in values:
results.append(
{
"Iterations": v["E"] * MAX_BATCHES_PER_EPOCH,
"Accuracy (%)": v["top1"],
r"$\beta$": momentum,
"seed": seed,
"q": str(f),
}
)
except Exception as e:
pass
results = | pd.DataFrame(results) | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kendalltau
import pandas as pd
import seaborn as sns
import argparse
import sys, os
import fnmatch
parser = argparse.ArgumentParser()
parser.add_argument('-es', help='<Required> give csv with es generations', required=True)
parser.add_argument('-de', help='<Required> give csv with de generations', required=True)
args = parser.parse_args()
es_data = np.genfromtxt(args.es, delimiter=";")
es_df = pd.DataFrame(data={'Generace': es_data[:, 0], 'Fitness': es_data[:, 1]})
es_df['EA'] = "ES"
de_data = np.genfromtxt(args.de,delimiter=";")
de_df = pd.DataFrame(data={'Generace': de_data[:,0], 'Fitness': de_data[:,1]})
de_df['EA'] = "DE"
df = | pd.concat([de_df,es_df]) | pandas.concat |
import random
import timeit
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from algorithms.sort import (quick_sort, merge_sort, pigeonhole_sort, counting_sort, radix_sort, cocktail_shaker_sort,
shell_sort, max_heap_sort, min_heap_sort, bucket_sort, cycle_sort, comb_sort)
def generate_random_list(size):
filled_list = []
for _ in range(0, size):
filled_list.append(random.randint(0, size))
return filled_list
def benchmark_run(func, list):
start_time = timeit.default_timer()
func(list)
return timeit.default_timer() - start_time
def plot_maker(list_of_sample_size, algorithm_to_benchmark, name):
num_runs = 10
duration = 0
benchmark_row = []
for algorithm in algorithm_to_benchmark:
print(algorithm.__name__)
for n in list_of_sample_size:
for _ in range(0, num_runs):
duration += benchmark_run(algorithm, generate_random_list(n))
benchmark_row.append([algorithm.__name__, n, (duration / num_runs)])
duration = 0
df = | pd.DataFrame(data=benchmark_row, columns=["Name", "Sample_size", "Duration"]) | pandas.DataFrame |
"""
accounting.py
Accounting and Financial functions.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : <EMAIL>
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt
credits :
"""
import datetime
import numpy as np
import pandas as pd
from pf.constants import DAYS_IN_YEAR
from pf.util import get_age
################################################################################################################################
# Financial Statements
################################################################################################################################
def calc_balance(accounts=None, category_dict=None):
"""
Calculate daily balances of grouped assets/liabilities based on `category_dict`s from `accounts`, returns a DataFrame.
Balance sheet is split into these sections:
Assets
Current
Cash
...
Long Term
Investments
Property
...
Liabilities
Current
Credit Card
...
Long Term
Loans
...
categories = {
'Assets' : {
'Current': {
# User category keys and account DataFrame columns list for values
'Cash & Cash Equivalents': [
('Cash', 'BofA Checking'),
('Cash', 'BofA Savings'),
...
],
'User Category': [...]
...
},
'Long Term': {...}
},
'Liabilities' : {
'Current': {...},
'Long Term': {...}
}
}
"""
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
balance_dict = {
(k0, k1, k2): accounts[v2].sum(axis=1) if v2 else pd.Series(0, index=accounts.index)
for k0, v0 in category_dict.iteritems()
for k1, v1 in v0.iteritems()
for k2, v2 in v1.iteritems()
}
# Convert to DataFrame
balance = pd.DataFrame(balance_dict)
return balance.fillna(0.0)
def balance_sheet(balance=None, period=datetime.datetime.now().year):
"""
Calculate and return a balance sheet.
Balance will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
All levels may be user defined by the category dictonary. The value of the last level must contain valid pandas DataFrame
column selectors, e.g. `Account Type` for single index column / level 0 access or `('Cash', 'Account Name')` for
multilevel indexing.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
balance = calc_balance(accounts, category_dict=categories)
balancesheet = balance_sheet(balance, period=2015)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
balance_sheets = []
for p in period:
# Force period to string
p = str(p)
# Sum over Period and convert to Statement DataFrame
p_balance = pd.DataFrame(balance[p].iloc[-1])
p_balance.columns = ['$']
p_balance.index.names = ['Category', 'Type', 'Item']
# Calculate Net
net = p_balance[['$']].sum(level=[0, 1]).sum(level=1)
net.index = pd.MultiIndex.from_tuples([('Net', x0, 'Total') for x0 in net.index])
net.index.names = ['Category', 'Type', 'Item']
# Add Net
balance_df = pd.concat([p_balance, net])
# Calculate percentages of level 0
balance_df['%'] = 100.0 * balance_df.div(balance_df.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = balance_df.sum(level=[0, 1])
l1_totals.index = pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index])
l1_totals.index.names = ['Category', 'Type', 'Item']
l0_totals = balance_df.sum(level=[0])
l0_totals.index = pd.MultiIndex.from_tuples([(x0, 'Total', ' ') for x0 in l0_totals.index])
l0_totals.index.names = ['Category', 'Type', 'Item']
# Add totals to dataframe
balance_df = balance_df.combine_first(l1_totals)
balance_df = balance_df.combine_first(l0_totals)
# Update columns with period
balance_df.columns = pd.MultiIndex.from_product([[p], balance_df.columns])
# Add to main list
balance_sheets.append(balance_df)
# Concatenate all the periods together
balance_sheets_df = pd.concat(balance_sheets, 1)
return balance_sheets_df
def calc_income(paychecks=None, transactions=None, category_dict=None, tax_type=None):
"""
Calculate daily income of grouped revenue/expenses/taxes based on `category_dict`s from `paychecks` and `transactions`,
returns a DataFrame.
Income Statement is split into these sections:
Revenue
Operating
Technical Services
...
Non-Operating
Interest Income
Dividend & Capital Gains
...
Expenses
Operating
Medical
...
Non-Operating
...
Taxes
Operating
Federal
State
...
All levels may be user defined by the category dictonary. However the last level must contain a dictionary
with at least a `category` key and set of categories for the value along with optional parameters.
```
'Revenue': {
'Operating': {
# Paychecks
'Technical Services': {
'source': 'paycheck', # Optional string to select data source, defaults to 'transactions'
'categories': {'Paycheck', ...}, # Required set of categories
'labels': set(), # Optional set of labels, defaults to set() if not passed in
'logic': '', # Optional 'not' string to set inverse of 'labels', defaults to ''
'tax_type' '' # Optional string for tax ('realized' or 'unrealized'), defaults to 'realized'
},
'User Category': {...}
},
'Non-Operating': {
'User Category': {
'categories': {...}
}
}
},
'Expenses': {
'Operating': {...},
'Non-Operating': {..}
},
'Taxes': {
'Operating': {...},
'Non-Operating': {..}
}
```
"""
# Clean category
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if not v2.has_key('source'):
category_dict[k0][k1][k2]['source'] = 'transactions'
if not v2.has_key('labels'):
category_dict[k0][k1][k2]['labels'] = set()
if not v2.has_key('logic'):
category_dict[k0][k1][k2]['logic'] = ''
if not v2.has_key('agg'):
category_dict[k0][k1][k2]['agg'] = np.ones(len(category_dict[k0][k1][k2]['categories']))
if not v2.has_key('tax_type'):
category_dict[k0][k1][k2]['tax_type'] = 'realized'
# Aggregate accounts based on category definition, via 3 level dictionary comprehension
income_dict = {}
for k0, v0 in category_dict.iteritems():
for k1, v1 in v0.iteritems():
for k2, v2 in v1.iteritems():
if v2['source'] == 'transactions':
income_dict[(k0, k1, k2)] = transactions[
(
# If it is in the category
transactions['Category'].isin(v2['categories'])
& transactions['Account Name'].isin(tax_type[v2['tax_type']])
) & (
# And if is has the correct label
(transactions['Labels'].apply(
lambda x: x.isdisjoint(v2['labels']) if v2['logic'] else not x.isdisjoint(v2['labels'])
)) |
# Or it does not have any labels
(transactions['Labels'].apply(lambda x: v2['labels'] == set()))
)
]['Amount']
else:
income_dict[(k0, k1, k2)] = (v2['agg'] * paychecks[list(v2['categories'])]).sum(axis=1)
# Convert to DataFrame
cats = income_dict.keys()
cats.sort()
income = pd.DataFrame(
data=[],
columns=pd.MultiIndex.from_tuples(cats),
index=pd.date_range(transactions.index[-1], transactions.index[0])
)
for cat in income_dict:
cat_df = pd.DataFrame(income_dict[cat].values, index=income_dict[cat].index, columns=pd.MultiIndex.from_tuples([cat]))
income[cat] = cat_df.groupby(lambda x: x.date()).sum()
return income.fillna(0.0)
def income_statement(income=None, period=datetime.datetime.now().year, nettax=None):
"""
Calculate and return an Income Statement.
Income will be based on the last entry of account data (e.g. December 31st) for the given `period` time period,
which defaults to the current year.
If a sequence of periods is passed, each period's data will be calculated and concatenated as MultiIndex columns.
Example:
```
income = calc_income(paychecks=paychecks, transactions=transactions, category_dict=categories)
incomestatement = income_statement(income, period=2016)
```
"""
# Force to list, so code below is the same for all cases
if not isinstance(period, list):
period = [period]
income_statements = []
for p in period:
# Force period to string and set default nettax
p = str(p)
nettax = nettax if nettax else {'Taxes'}
# Convert to DataFrame
p_income = pd.DataFrame(income[p].sum(), columns=['$'])
p_income.index.names = ['Category', 'Type', 'Item']
# Calculate percentages of level 0
p_income['%'] = 100.0 * p_income.div(p_income.sum(level=0), level=0)
# Calculate heirarchical totals
l1_totals = p_income.sum(level=[0, 1])
l1_totals.index = | pd.MultiIndex.from_tuples([(x0, x1, 'Total') for x0, x1 in l1_totals.index]) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
# @Author: jerry
# @Date: 2017-09-09 21:03:21
# @Last Modified by: jerry
# @Last Modified time: 2017-09-23 17:09:41
import pandas as pd
from log_lib import log
def get_csv(filename, path=None):
df = | pd.read_csv(filename) | pandas.read_csv |
# Obtaining and processing CVE json **files**
# The code is to download nvdcve zip files from NIST since 2002 to the current year,
# unzip and append all the JSON files together,
# and extracts all the entries from json files of the projects.
# 获取和处理CVE json **文件**
# 代码是从NIST下载nvdcve zip文件从2002年到今年,
# 解压并附加所有JSON文件,
# 并从项目的json文件中提取所有条目。
import datetime
import json
import os
import re
from io import BytesIO
import pandas as pd
import requests
from pathlib import Path
from zipfile import ZipFile
from pandas import json_normalize
from extract_cwe_record import add_cwe_class, extract_cwe
import configuration as cf
import database as db
# ---------------------------------------------------------------------------------------------------------------------
# 从NIST下载nvdcve zip文件
urlhead = 'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-'
urltail = '.json.zip'
initYear = 2002
currentYear = datetime.datetime.now().year
# Consider only current year CVE records when sample_limit>0 for the simplified example.
if cf.SAMPLE_LIMIT > 0:
initYear = currentYear
df = pd.DataFrame()
# cve的列
ordered_cve_columns = ['cve_id', 'published_date', 'last_modified_date', 'description', 'nodes', 'severity',
'obtain_all_privilege', 'obtain_user_privilege', 'obtain_other_privilege',
'user_interaction_required',
'cvss2_vector_string', 'cvss2_access_vector', 'cvss2_access_complexity', 'cvss2_authentication',
'cvss2_confidentiality_impact', 'cvss2_integrity_impact', 'cvss2_availability_impact',
'cvss2_base_score',
'cvss3_vector_string', 'cvss3_attack_vector', 'cvss3_attack_complexity',
'cvss3_privileges_required',
'cvss3_user_interaction', 'cvss3_scope', 'cvss3_confidentiality_impact',
'cvss3_integrity_impact',
'cvss3_availability_impact', 'cvss3_base_score', 'cvss3_base_severity',
'exploitability_score', 'impact_score', 'ac_insuf_info',
'reference_json', 'problemtype_json']
# cwe的列
cwe_columns = ['cwe_id', 'cwe_name', 'description', 'extended_description', 'url', 'is_category']
# ---------------------------------------------------------------------------------------------------------------------
def rename_columns(name):
"""
converts the other cases of string to snake_case, and further processing of column names.
将字符串的其他情况转换为snake_case,并进一步处理列名。
"""
name = name.split('.', 2)[-1].replace('.', '_')
name = re.sub(r'(?<!^)(?=[A-Z])', '_', name).lower()
name = name.replace('cvss_v', 'cvss').replace('_data', '_json').replace('description_json', 'description')
return name
def preprocess_jsons(df_in):
"""
Flattening CVE_Items and removing the duplicates
:param df_in: merged dataframe of all years json files
平坦CVE_Items并删除副本
:param df_in:合并所有年份json文件作为DataFrame
参考: JSON文件格式
"CVE_data_type" : "CVE",
"CVE_data_format" : "MITRE",
"CVE_data_version" : "4.0",
"CVE_data_numberOfCVEs" : "40",
"CVE_data_timestamp" : "2022-01-08T08:00Z",
"CVE_Items" : [略]
"""
# 报告: 开始平坦CVE_Items并删除副本
cf.logger.info('Flattening CVE_Items and removing the duplicates...')
# 只提取CVE_Items,抛弃其他
cve_items = | json_normalize(df_in['CVE_Items']) | pandas.json_normalize |
import numpy as np
import pandas as pd
from analysis.transform_fast import load_raw_cohort, transform
def test_immuno_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMRX_DAT <> NULL | Select | Next
if pd.notnull(row["immrx_dat"]):
assert row["immuno_group"]
continue
# IF IMMDX_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["immdx_cov_dat"]):
assert row["immuno_group"]
else:
assert not row["immuno_group"]
def test_ckd_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CKD_COV_DAT <> NULL (diagnoses) | Select | Next
if pd.notnull(row["ckd_cov_dat"]):
assert row["ckd_group"]
continue
# IF CKD15_DAT = NULL (No stages) | Reject | Next
if pd.isnull(row["ckd15_dat"]):
assert not row["ckd_group"]
continue
# IF CKD35_DAT>=CKD15_DAT | Select | Reject
if gte(row["ckd35_dat"], row["ckd15_dat"]):
assert row["ckd_group"]
else:
assert not row["ckd_group"]
def test_ast_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF ASTADM_DAT <> NULL | Select | Next
if pd.notnull(row["astadm_dat"]):
assert row["ast_group"]
continue
# IF AST_DAT <> NULL | Next | Reject
if pd.isnull(row["ast_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM1 <> NULL | Next | Reject
if pd.isnull(row["astrxm1_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM2 <> NULL | Next | Reject
if pd.isnull(row["astrxm2_dat"]):
assert not row["ast_group"]
continue
# IF ASTRXM3 <> NULL | Select | Reject
if pd.notnull(row["astrxm3_dat"]):
assert row["ast_group"]
else:
assert not row["ast_group"]
def test_cns_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF CNS_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["cns_cov_dat"]):
assert row["cns_group"]
else:
assert not row["cns_group"]
def test_resp_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF AST_GROUP <> NULL | Select | Next
if row["ast_group"]:
assert row["resp_group"]
continue
# IF RESP_COV_DAT <> NULL | Select | Reject
if pd.notnull(row["resp_cov_dat"]):
assert row["resp_group"]
else:
assert not row["resp_group"]
def test_bmi_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_OBESITY_DAT > BMI_DAT | Select | Next
if gt(row["sev_obesity_dat"], row["bmi_dat"]):
assert row["bmi_group"]
continue
# IF BMI_VAL >=40 | Select | Reject
if gte(row["bmi_val"], 40):
assert row["bmi_group"]
else:
assert not row["bmi_group"]
def test_diab_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF DIAB_DAT > DMRES_DAT | Select | Reject
if gt(row["diab_dat"], row["dmres_dat"]):
assert row["diab_group"]
else:
assert not row["diab_group"]
def test_sevment_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF SEV_MENTAL_DAT > SMHRES_DAT | Select | Reject
if gt(row["sev_mental_dat"], row["smhres_dat"]):
assert row["sevment_group"]
else:
assert not row["sevment_group"]
def test_atrisk_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF IMMUNOGROUP <> NULL | Select | Next
if row["immuno_group"]:
assert row["atrisk_group"]
continue
# IF CKD_GROUP <> NULL | Select | Next
if row["ckd_group"]:
assert row["atrisk_group"]
continue
# IF RESP_GROUP <> NULL | Select | Next
if row["resp_group"]:
assert row["atrisk_group"]
continue
# IF DIAB_GROUP <> NULL | Select | Next
if row["diab_group"]:
assert row["atrisk_group"]
continue
# IF CLD_DAT <>NULL | Select | Next
if pd.notnull(row["cld_dat"]):
assert row["atrisk_group"]
continue
# IF CNS_GROUP <> NULL | Select | Next
if row["cns_group"]:
assert row["atrisk_group"]
continue
# IF CHD_COV_DAT <> NULL | Select | Next
if pd.notnull(row["chd_cov_dat"]):
assert row["atrisk_group"]
continue
# IF SPLN_COV_DAT <> NULL | Select | Next
if pd.notnull(row["spln_cov_dat"]):
assert row["atrisk_group"]
continue
# IF LEARNDIS_DAT <> NULL | Select | Next
if pd.notnull(row["learndis_dat"]):
assert row["atrisk_group"]
continue
# IF SEVMENT_GROUP <> NULL | Select | Reject
if row["sevment_group"]:
assert row["atrisk_group"]
else:
assert not row["atrisk_group"]
def test_covax1d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVRX1_DAT <> NULL | Select | Next
if pd.notnull(row["covrx1_dat"]):
assert row["covax1d_group"]
continue
# IF COVADM1_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm1_dat"]):
assert row["covax1d_group"]
else:
assert not row["covax1d_group"]
def test_covax2d_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["covax2d_group"]
continue
# IF COVRX2_DAT <> NULL | Select | Next
if pd.notnull(row["covrx2_dat"]):
assert row["covax2d_group"]
continue
# IF COVADM2_DAT <> NULL | Select | Reject
if pd.notnull(row["covadm2_dat"]):
assert row["covax2d_group"]
else:
assert not row["covax2d_group"]
def test_unstatvacc1_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX1D_GROUP <> NULL | Next | Reject
if not row["covax1d_group"]:
assert not row["unstatvacc1_group"]
continue
# IF AZD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF PFD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF MOD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF NXD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF JND1RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF GSD1RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd1rx_dat"]):
assert not row["unstatvacc1_group"]
continue
# IF VLD1RX_DAT <> NULL | Reject | Select
if pd.notnull(row["vld1rx_dat"]):
assert not row["unstatvacc1_group"]
else:
assert row["unstatvacc1_group"]
def test_unstatvacc2_group():
raw_cohort = load_raw_cohort("tests/input.csv")
cohort = transform(raw_cohort)
for ix, row in cohort.iterrows():
# IF COVAX2D_GROUP <> NULL | Next | Reject
if not row["covax2d_group"]:
assert not row["unstatvacc2_group"]
continue
# IF AZD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["azd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF PFD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["pfd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF MOD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["mod2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF NXD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["nxd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF JND2RX _DAT <> NULL | Reject | Next
if pd.notnull(row["jnd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF GSD2RX_DAT <> NULL | Reject | Next
if pd.notnull(row["gsd2rx_dat"]):
assert not row["unstatvacc2_group"]
continue
# IF VLD2RX_DAT <> NULL | Reject | Select
if | pd.notnull(row["vld2rx_dat"]) | pandas.notnull |
"""Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert as_json_table_type(t) == 'duration'
def test_as_json_table_type_string_dtypes(self):
strings = [object] # TODO
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_dtypes(self):
# TODO: I think before is_categorical_dtype(Categorical)
# returned True, but now it's False. Figure out why or
# if it matters
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(CategoricalDtype()) == 'any'
class TestTableOrient(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])),
'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True)),
'G': [1., 2., 3, 4.],
'H': pd.date_range('2016-01-01', freq='d', periods=4,
tz='US/Central'),
},
index=pd.Index(range(4), name='idx'))
def test_build_series(self):
s = pd.Series([1, 2], name='a')
s.index.name = 'id'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
{'name': 'a', 'type': 'integer'}]
schema = {
'fields': fields,
'primaryKey': ['id'],
}
expected = OrderedDict([
('schema', schema),
('data', [OrderedDict([('id', 0), ('a', 1)]),
OrderedDict([('id', 1), ('a', 2)])])])
assert result == expected
def test_to_json(self):
df = self.df.copy()
df.index.name = 'idx'
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'E',
'ordered': False,
'type': 'any'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'F',
'ordered': True,
'type': 'any'},
{'name': 'G', 'type': 'number'},
{'name': 'H', 'type': 'datetime', 'tz': 'US/Central'}
]
schema = {
'fields': fields,
'primaryKey': ['idx'],
}
data = [
OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
('C', '2016-01-01T00:00:00.000Z'),
('D', 'P0DT1H0M0S'),
('E', 'a'), ('F', 'a'), ('G', 1.),
('H', '2016-01-01T06:00:00.000Z')
]),
OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
('C', '2016-01-02T00:00:00.000Z'),
('D', 'P0DT1H1M0S'),
('E', 'b'), ('F', 'b'), ('G', 2.),
('H', '2016-01-02T06:00:00.000Z')
]),
OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
('C', '2016-01-03T00:00:00.000Z'),
('D', 'P0DT1H2M0S'),
('E', 'c'), ('F', 'c'), ('G', 3.),
('H', '2016-01-03T06:00:00.000Z')
]),
OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
('C', '2016-01-04T00:00:00.000Z'),
('D', 'P0DT1H3M0S'),
('E', 'c'), ('F', 'c'), ('G', 4.),
('H', '2016-01-04T06:00:00.000Z')
]),
]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1., 2.])
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
('data', [OrderedDict([('index', 1.0), ('values', 1)]),
OrderedDict([('index', 2.0), ('values', 1)])])])
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range('2016', freq='Q-JAN', periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'},
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
('values', 1)]),
OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
('values', 1)])]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(['a', 'b']))
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema',
{'fields': [{'name': 'index', 'type': 'any',
'constraints': {'enum': ['a', 'b']},
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
('data', [
OrderedDict([('index', 'a'),
('values', 1)]),
OrderedDict([('index', 'b'), ('values', 1)])])])
)
assert result == expected
def test_date_format_raises(self):
with pytest.raises(ValueError):
self.df.to_json(orient='table', date_format='epoch')
# others work
self.df.to_json(orient='table', date_format='iso')
self.df.to_json(orient='table')
def test_make_field_int(self):
data = [1, 2, 3]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = | make_field(kind) | pandas.io.json.table_schema.make_field |
import numpy as np
import pytest
from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range
data = {
"A": [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
"C": np.arange(10, dtype=np.float64),
"D": [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan],
}
dates = bdate_range("1/1/2011", periods=10)
# fixture names must be compatible with the tests in
# tests/frame/test_api.SharedWithSparse
@pytest.fixture
def float_frame_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
return DataFrame(data, index=dates)
@pytest.fixture
def float_frame():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
# default_kind='block' is the default
return SparseDataFrame(data, index=dates, default_kind="block")
@pytest.fixture
def float_frame_int_kind():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
Some entries are missing.
"""
return SparseDataFrame(data, index=dates, default_kind="integer")
@pytest.fixture
def float_string_frame():
"""
Fixture for sparse DataFrame of floats and strings with DatetimeIndex
Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
"""
sdf = | SparseDataFrame(data, index=dates) | pandas.SparseDataFrame |
"""Module and script to combine IDs with molreports to form graphs and masks.
This module provides functions and a script to extract bond and atom identifier
information, combine these IDs with bonds from the molreport file, and output:
1) An atom-level node list
2) An atom-level covalent bond list
3) A mask of atoms that have been added to the graph, but are not present in the
structure
"""
import ntpath
import sys
import pandas as pd
import networkx as nx
# molreport extraction
def extract_molreport(filepath, strip_h=False):
"""
Extract relevant information from molreport file type.
Args:
- filepath (str) - Path to molreport file.
- strip_h (bool) - Whether to strip hydrogens from molreport file
(default=False).
Returns:
- Tuple of Pandas dataframes, one for atom information and one for bond
information.
"""
# Dict to hold the atom information
atom_info = {
'atom': [],
'element': [],
'type': [],
'hyb': [],
'charge': []
}
# Dict to hold the bond information
bond_info = {
'start': [],
'end': [],
'order': []
}
# Dict to hold the element identities
elements = {}
# Open the molreport file
with open(filepath) as molreport:
# Read the file
for line in molreport.readlines():
# Handle atoms case
if line.startswith('ATOM'):
# Split the line
splitline = line.strip().split()
# Extract relevant information for each atom, respecting
# hydrogen stripping parameters
if (splitline[2] != 'H' and strip_h) or not strip_h:
atom_info['atom'].append(int(splitline[1]))
atom_info['element'].append(splitline[2])
atom_info['type'].append(splitline[4])
atom_info['hyb'].append(int(splitline[6]))
atom_info['charge'].append(float(splitline[8]))
# Get the element identity
elements[int(splitline[1])] = splitline[2]
# Handle bonds case
elif line.startswith('BOND'):
# Split the line
splitline = line.strip().split()
# Get the bond start and end points
bond_start = int(splitline[3])
bond_end = int(splitline[5])
# Whether bond includes hydrogen
not_h = (elements[bond_start] != 'H' and
elements[bond_end] != 'H')
# Extract relevant information for each atom, respecting
# hydrogen stripping parameters
if (not_h and strip_h) or not strip_h:
# Extract bond info, with correct ordering
if bond_start < bond_end:
bond_info['start'].append(bond_start)
bond_info['end'].append(bond_end)
else:
bond_info['start'].append(bond_end)
bond_info['end'].append(bond_start)
# Extract bond order (e.g., single, double, etc.)
bond_info['order'].append(splitline[7])
# Return a data frame of the relevant info
atom_info = pd.DataFrame(atom_info)
atom_info['element'] = atom_info['element'].apply(lambda x: x.title())
bond_info = pd.DataFrame(bond_info)
return (atom_info, bond_info)
def extract_ids(filepath):
"""
Extract atom identying attributes from file.
Args:
- filepath (str) - Path to ID file.
Returns:
- Pandas DataFrame of the atom name, identifier, and element.
"""
ids = pd.read_table(filepath, names=['atom', 'identifier', 'element'],
keep_default_na=False)
ids['element'] = ids['element'].apply(lambda x: x.title())
return ids
def merge_molreport_ids(molreport_atoms, molreport_bonds, ids):
"""Merge molreport with ID information.
Merges ID information (chain, residues, atom, etc.) with molreport
information including bonds.
Args:
- molreport_atoms (pd.DataFrame) - Pandas DataFrame containing all atom
information from the molreport.
- molreport_bonds (pd.DataFrame) - Pandas DataFrame containing all bond
information from the molreport.
- ids (pd.DataFrame) - Pandas DataFrame containing indentifying information
for each individual atom, to be joined into less descriptive molreport
identifiers.
Returns:
- Tuple of Pandas DataFrames (atoms and bonds, respoectively) with merged ID
information for each atom.
"""
# Handle atoms file
atom_out = (
pd.merge(molreport_atoms, ids, on=['atom', 'element'])
.drop('atom', axis=1)
.rename(columns={'identifier': 'atom'})
)
atom_out = atom_out[['atom', 'element', 'type', 'hyb', 'charge']]
# Handle bonds
start_merge = (
pd.merge(molreport_bonds,
ids[['atom', 'identifier']],
left_on='start',
right_on='atom')
.drop(['start', 'atom'], axis=1)
.rename(columns={'identifier': 'start'})
)
end_merge = (
pd.merge(start_merge,
ids[['atom', 'identifier']],
left_on='end',
right_on='atom')
.drop(['end', 'atom'], axis=1)
.rename(columns={'identifier': 'end'})
)
bond_out = end_merge[['start', 'end', 'order']]
return (atom_out, bond_out)
def strip_hydrogen(atoms, bonds):
"""
Remove hydrogens from the atom and bond tables.
"""
atoms = atoms[atoms['element'] != 'H']
bonds = bonds[bonds['start'].isin(atoms['atom']) &
bonds['end'].isin(atoms['atom'])]
return (atoms, bonds)
def augment_bonds(bonds):
"""
Split bond identifiers into component columns.
"""
start_info = (
bonds['start'].str.split(':', expand=True)
.rename(columns={0: 'start_chain',
1: 'start_res',
2: 'start_num',
3: 'start_atom'})
)
end_info = (
bonds['end'].str.split(':', expand=True)
.rename(columns={0: 'end_chain',
1: 'end_res',
2: 'end_num',
3: 'end_atom'})
)
bonds = pd.concat([bonds, start_info, end_info], axis=1)
bonds['start_num'] = bonds['start_num'].astype(int)
bonds['end_num'] = bonds['end_num'].astype(int)
return bonds
def augment_atoms(atoms):
"""
Split atom identifiers into component columns.
"""
atoms_info = (
atoms['atom'].str.split(':', expand=True)
.rename(columns={0: 'chain',
1: 'res',
2: 'num',
3: 'atom_name'})
)
atoms = pd.concat([atoms, atoms_info], axis=1)
atoms['num'] = atoms['num'].astype(int)
return atoms
def identify_gaps(chain_atoms):
"""
Identify gaps in chain of atoms.
"""
min_num = chain_atoms['num'].min()
max_num = chain_atoms['num'].max()
present = []
absent = []
breakpoints = []
unique_idxs = chain_atoms['num'].unique()
for i in range(min_num, max_num + 1):
if i in unique_idxs:
present.append(i)
term = i in (min_num, max_num)
up_break = i + 1 not in chain_atoms['num']
down_break = i - 1 not in chain_atoms['num']
breakpoint = not term and (up_break or down_break)
if breakpoint:
breakpoints.append(i)
else:
absent.append(i)
return (present, absent, breakpoints)
def patch_gaps(chain, seq, absent, breakpoints):
"""
Patch gaps in a chain.
"""
# Extract information
chain_atoms, chain_bonds = chain
seq_atoms, seq_bonds = seq
# Initialize a list for the missing atoms
all_missing = []
# Get chain ID
chain = chain_atoms['chain'].unique()[0]
# Get missing atoms and bonds
missing_atoms = seq_atoms[(seq_atoms['chain'] == chain) &
(seq_atoms['num'].isin(absent)) &
(~seq_atoms['atom'].isin(chain_atoms['atom']))]
missing_bonds = seq_bonds[seq_bonds['start'].isin(missing_atoms['atom']) |
seq_bonds['end'].isin(missing_atoms['atom'])]
chain_atoms = | pd.concat([chain_atoms, missing_atoms]) | pandas.concat |
#!/home/ubuntu/anaconda3/bin//python
'''
MIT License
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The code is inspired by https://github.com/erikor/medline project, but the logic to
parse medline XML was substantially modified.
'''
# pre-requisites: pip install elasticsearch
# pip install --upgrade pip
# to execute this code:
# STEP 0: ensure elastic search and kibana are running on port 9200
# and 5601 correspondingly
# STEP 1: make sure you have all the medline XML files downloaded from
# STEP 2: then you run nohup ls *.xml | xargs -n 1 -P 4 python ./parseMedline.py &
# the above step assume quad-core processor, and runs it as daemon process so when
# you exit SSH session, it runs in background.
# this should load the data into elastic search
import pandas as pd
import glob
import sys
list_descr = []
list_speech = []
list_speakermap = []
descr_filenames = glob.glob("." + "/descr*.txt")
speech_filenames = glob.glob("." + "/speech*.txt")
speakermap_filenames = glob.glob("." + "/*SpeakerMap.txt")
for filename in descr_filenames:
try:
list_descr.append(pd.read_csv(filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
except:
print("Error reading description file = ", filename)
for filename in speech_filenames:
try:
list_speech.append(pd.read_csv(filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
except:
print("Error reading speech file = ", filename)
for filename in speakermap_filenames:
try:
list_speakermap.append(pd.read_csv(filename, sep="|", error_bad_lines=False, header = 0, encoding='ISO-8859-1'))
except:
print("Error reading speakermap file = ", filename)
df_descr = pd.concat(list_descr)
df_speech = pd.concat(list_speech)
df_speakermap = pd.concat(list_speakermap)
list_descr = None
list_speech = None
list_speakermap = None
df_descr_speech_speakermap = pd.merge(df_descr, df_speech, on='speech_id')
df_descr_speech_speakermap = pd.merge(df_descr_speech_speakermap, df_speakermap, on=['speech_id'])
df_descr = None
df_speech = None
df_speakermap = None
# convert date
df_descr_speech_speakermap['date'] = | pd.to_datetime(df_descr_speech_speakermap['date'], format='%Y%m%d') | pandas.to_datetime |
import sys
import logging
import pandas as pd
import pytz
import bt
try:
from . import module_loader
except:
import module_loader
sys.dont_write_bytecode = True
class AlgoRunner(object):
def __init__(self, stock_data_provider, capital_base, parameters):
self.stock_data_provider_ = stock_data_provider
self.load_data_ = module_loader.load_module_func(stock_data_provider,
'load_stock_data')
self.capital_base_ = capital_base
self.parameters_ = parameters
def __create_pd_panel(self, all_data):
trading_data = {}
for data in all_data:
trading_data[data.stock_id] = data.data_frame['close']
panel = pd.DataFrame(data=trading_data)
return panel
def ensure_stock_data(self, symbols):
for symbol in symbols:
self.load_data_(symbol)
def run(self, algo, symbols, start_date=None, end_date=None, analyze_func=None):
data = []
for symbol in symbols:
data.append(self.load_data_(symbol))
if start_date:
start_date = pd.to_datetime(start_date)
if end_date:
end_date = | pd.to_datetime(end_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:08:35 2019
@author: Team BTC - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
#sorry the code isnt very efficient. because of time constraints and the number of people working on the project, we couldnt do all the automatizations we would have liked to do.
#Code in block comment should not be run as it will make change to the cloud database
# %% Importing libraries
# You may need to install dnspython in order to work with cloud server
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime as dt
import os
import time
import re
import copy
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import timedelta
from pymongo import MongoClient
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
#os.chdir('H:/Documents/Alternance/Project/')
# %% Function to scrap data from Stocktwit and add to the cloud server
# The function have 2 inputs:
# - Symbol of the asset in string
# - Rate limit: number of requests per execution, in integer
def get_stwits_data(symbol,rate_limit):
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
exist=0
for q in db['{}'.format(symbol)].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
exist=1
min_prev_id=q['min']
http = urllib3.PoolManager()
mid=[]
duplicates=0
for j in tqdm(range(rate_limit)):
if exist==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json".format(symbol)
elif exist!=0 and len(mid)==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_prev_id)
else:
min_ID=min(mid)
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_ID)
r = http.request('GET', url)
try:
data = json.loads(r.data)
except:
print('Decode error, retry again')
continue
if duplicates==1:
print('\nThere are duplicates in the result. Other people are maybe running. \nPlease try again later.')
break
if data["response"]["status"] != 200:
print("\nYour request was denied, retry in 1 hour")
time.sleep(3600)
continue
# insert_element=[]
# break
for element in data["messages"]:
mid.append(element["id"])
symbol_list=[]
for s in element['symbols']:
symbol_list.append(s['symbol'])
try:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": (element["entities"]["sentiment"]["basic"]=="Bullish")*2-1,'Symbols':symbol_list}
except:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": 0,'Symbols':symbol_list}
try:
result = db['{}'.format(symbol)].insert_one(insert_element)
except:
duplicates=1
break
return insert_element
# %% Execution of the function
symbol='BTC.X'
rate_limit=2000
last_ele=get_stwits_data(symbol,rate_limit)
# %% #Creating custom lexicon
#%% Finding the time interval of the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
#Getting the minimum id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
minID=q['min']
#Getting the timestamp from the min ID
for post in db['BTC.X'].find({'ID':minID}):
start_time=post['TimeStamp']
#Getting the max id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"max": { "$max": "$ID" }
}}
]):
maxID=q['max']
#Getting the timestamp from the max ID
for post in db['BTC.X'].find({'ID':maxID}):
end_time=post['TimeStamp']
start_time=dt.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
end_time=dt.strptime(end_time,'%Y-%m-%dT%H:%M:%SZ')
period=np.arange(dt(start_time.year,start_time.month,start_time.day),dt(end_time.year,end_time.month,end_time.day),timedelta(days=1))
#%% Creating dictionary
#Creating function to find words in positive and negative function
def create_positive_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
def create_negative_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=-1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
from multiprocessing import Pool
pool = Pool()
#creating positive dictionary
df=list(tqdm(pool.imap(create_positive_dictionary_by_day, period), total=len(period)))
positive_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
positive_dictionary=positive_dictionary.add(df[i].set_index('Word'), fill_value=0)
#creating negative dictionary
df=list(tqdm(pool.imap(create_negative_dictionary_by_day, period), total=len(period)))
negative_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
negative_dictionary=negative_dictionary.add(df[i].set_index('Word'), fill_value=0)
negative_dictionary=negative_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary=positive_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary.columns=['Positive Freq']
negative_dictionary.columns=['Negative Freq']
positive_dictionary=positive_dictionary/db['BTC.X'].count_documents({'Sentiment':1})
negative_dictionary=negative_dictionary/db['BTC.X'].count_documents({'Sentiment':-1})
#Combining both dictionary
final_dict=positive_dictionary.add(negative_dictionary, fill_value=0).sort_values('Positive Freq',ascending=False)
final_dict['Pos over Neg']=final_dict['Positive Freq']/final_dict['Negative Freq']
#Removing stopwords from the dictionary
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
final_dict=final_dict.reset_index()
for i in final_dict['Word']:
if i in stop_words:
final_dict=final_dict[final_dict['Word']!=i]
#Removing words below the threshold
final_dic=final_dict.fillna(value=0)
final_dict=final_dict[(final_dict['Negative Freq']>0.0005) | (final_dict['Positive Freq']>0.0005)]
final_dict.fillna(value=0).sort_values('Pos over Neg',ascending=False).to_csv('Simple_Dictionary2.csv')
#%% Creating positive and negative word list from the lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('Simple_Dictionary2.csv')
lexicon=lexicon[['Word','Classification']]
neg_list=list(lexicon[lexicon['Classification']==-1]['Word'])
pos_list=list(lexicon[lexicon['Classification']==1]['Word'])
# Update lexicon result to the database
import nltk
porter = nltk.PorterStemmer()
import re
import copy
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
for i in range(32):
for documents in tqdm(db['BTC.X'].find({'Custom_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)):
if documents['Sentiment']==0:
score=0
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Custom_Lexicon_Sentiment':documents['Sentiment']}})
#%% Creating positive and negative word list from the teacher lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=pd.read_csv('l2_lexicon.csv',sep=';')
neg_list=list(lexicon[lexicon['sentiment']=='negative']['keyword'])
pos_list=list(lexicon[lexicon['sentiment']=='positive']['keyword'])
# Update lexicon result to the database
pattern = r'''(?x) # set flag to allow verbose regexps
(?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \$?\w+(?:\.\w+)?%? # tickers
| \@?\w+(?:\.\w+)?%? # users
| \.\.\. # ellipsis
| [][.,;"'?!():_`-] # these are separate tokens; includes ], [
'''
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
cursor=db['BTC.X'].find({'Prof_Lexicon_Sentiment':{ "$exists" : False }},limit=10000)
for i in range(32):
for documents in tqdm(cursor):
if documents['Sentiment']==0:
score=0
word_list=nltk.regexp_tokenize(documents['Content'], pattern)
# word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
# word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in neg_list:
score+=-1
if word in pos_list:
score+=1
if score >0:
senti=1
elif score <0:
senti=-1
else:
senti=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':senti}})
else:
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Prof_Lexicon_Sentiment':documents['Sentiment']}})
#%% Adding Vader analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
for i in tqdm(range(31)):
for documents in collection.find({'Vader_sentiment2':{ "$exists" : False }},limit=10000):
doc_id = documents['_id']
Vaderdoc = analyser.polarity_scores(documents['Content'])
Vaderdoc= Vaderdoc.get('compound')
if Vaderdoc> 0.33:
Sentiment_vader=1
elif Vaderdoc< -0.33:
Sentiment_vader=-1
else:
Sentiment_vader=0
print (Sentiment_vader)
#Insert Vader value to the database
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment2':Sentiment_vader}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Vader_sentiment':Vaderdoc}})
#%% Adding Textblob analysis value to the database
# Connecting to the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
collection= db['BTC.X']
# Applying Vader
analyser = SentimentIntensityAnalyzer()
#Vader=[] 54452
for i in tqdm(range(31)):
for documents in collection.find({'Textblob_Sentiment2':{'$exists':False}},limit=10000):
doc_id = documents['_id']
pola = TextBlob(documents['Content']).sentiment.polarity
# Vader.append(Vaderdoc)
if pola> 0.33:
Sentiment_txt=1
elif pola< -0.33:
Sentiment_txt=-1
else:
Sentiment_txt=0
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment2':Sentiment_txt}})
db['BTC.X'].update_one({'_id':documents['_id']},{'$set':{'Textblob_Sentiment':pola}})
#%% Econometric testing
#%% Import BTC price time series
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
price=[]
for documents in db['BTC.Price'].find({}):
price.append([documents['Time'],documents['Price']])
price=pd.DataFrame(price,columns=['Time','Price'])
price['Time']=pd.to_datetime(price['Time'])
price=price.set_index('Time')
price=price[price.index<=dt(2019,9,21,14)]
plt.figure()
price.plot()
price['r_btc'] = (price.Price - price.Price.shift(1)) / price.Price.shift(1)
#%% Import all sentiment time series
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=[]
for documents in tqdm(db['BTC'].find({})):
sentimental.append([documents['TimeStamp'],documents['Custom_Lexicon_Sentiment'],documents['Prof_Lexicon_Sentiment'],documents['Textblob_Sentiment'],documents['Textblob_Sentiment2'],documents['Vader_sentiment'],documents['Vader_sentiment2'],documents['Sentiment']])
sentimental=pd.DataFrame(sentimental,columns=['Time','Custom_Lexicon_Sentiment','Prof_Lexicon_Sentiment','Textblob_Sentiment_prob','Textblob_Sentiment_binary','Vader_sentiment_prob','Vader_sentiment_binary','Origin_sentiment'])
sentimental=sentimental.set_index('Time')
sentimental.index=pd.to_datetime(sentimental.index.tz_localize(None))
# Resample time series into hour
sentiment_1h=sentimental.resample('1H').mean()
sentiment_1h.plot()
sentiment_1h=sentiment_1h[sentiment_1h.index > dt(2019,1,1) ]
# Export the time series to database
for i in tqdm(range(len(sentiment_1h))):
insert_element = {"Time": sentiment_1h.index[i], "{}".format(sentiment_1h.columns[0]): sentiment_1h["{}".format(sentiment_1h.columns[0])][i],"{}".format(sentiment_1h.columns[1]): sentiment_1h["{}".format(sentiment_1h.columns[1])][i], "{}".format(sentiment_1h.columns[2]): sentiment_1h["{}".format(sentiment_1h.columns[2])][i], "{}".format(sentiment_1h.columns[3]): sentiment_1h["{}".format(sentiment_1h.columns[3])][i], "{}".format(sentiment_1h.columns[4]): sentiment_1h["{}".format(sentiment_1h.columns[4])][i], "{}".format(sentiment_1h.columns[5]): sentiment_1h["{}".format(sentiment_1h.columns[5])][i], "{}".format(sentiment_1h.columns[6]): sentiment_1h["{}".format(sentiment_1h.columns[6])][i]}
result = db['Time_series_Data'].insert_one(insert_element)
#
sentiment_1h=[]
for documents in tqdm(db['Time_series_Data'].find({})):
sentiment_1h.append([documents['Time'],documents['Custom_Lexicon_Sentiment'],documents['Prof_Lexicon_Sentiment'],documents['Textblob_Sentiment_prob'],documents['Textblob_Sentiment_binary'],documents['Vader_sentiment_prob'],documents['Vader_sentiment_binary'],documents['Origin_sentiment']])
sentiment_1h=pd.DataFrame(sentiment_1h,columns=['Time','Custom_Lexicon_Sentiment','Prof_Lexicon_Sentiment','Textblob_Sentiment_prob','Textblob_Sentiment_binary','Vader_sentiment_prob','Vader_sentiment_binary','Origin_sentiment'])
sentiment_1h=sentiment_1h.set_index('Time')
sentiment_1h.index=pd.to_datetime(sentiment_1h.index.tz_localize(None))
#%% Correlation Matrix
test_data=pd.concat([price,sentiment_1h],axis=1)
test_data=test_data.fillna(value=0)
corr_matrix=test_data.corr()
#==============================================================================
#%%Time series analysis for custom lexicon and professor's lexicon
#analyse each timeseries by plotting them
sentiment_1h=sentiment_1h.dropna()
sentiprof=sentiment_1h.iloc[:,1]
senticustom=sentiment_1h.iloc[:,0]
sentiprof=sentiprof.dropna()
senticustom=senticustom.dropna()
sentiprof.astype(float)
senticustom.astype(float)
plt.figure()
btweet= sentiprof.plot(title='One hour average sentiment value(sentiprof)')
plt.figure()
btweetc=senticustom.plot(title='One hour average sentiment value2(senticustom)')
#from this graph, we can find our two sentiment values fluctuates, but 'quite stable'.
sentiprof.mean()
senticustom.mean()
#sentiprof mean value is 0.3615, it is lower than senticustom mean value which is 0.44
#Through this grough,we can observe a positive sentiment of btcoin on tweet from janurary 2019.
price.astype(float)
plt.figure()
priceg= price.Price.plot(title='Price of Bitcoin since Jan 2019(one hour)')
#Through this graph, we can find price of Bitcoin has an increasing trend from Jan 2019 to July 2019)
preturn=(price.Price-price.Price.shift(1))/price.Price.shift(1)
preturn=preturn.dropna()
preturn.mean()
plt.figure()
preturn.plot(title='Price return of Bitcoin since Jan 2019(one hour)')
#From this graph of price return, we can find it has some fluctuations, but 'quite stable' for us.
#%%Stationarity test, Unitroot test
#<NAME>
adfuller(sentiprof,regression='ct')
adfuller(sentiprof,regression='nc')
#p value is small enough, at 95% confidence interval, we can say there is no unitroot in sentiprof, the series is quite stationary.
#Custom Lexicon
adfuller(senticustom,regression='ct')
adfuller(senticustom,regression='nc')
##the p-value is low enough, at 95% confidence level, we can reject the null typothesis which there is a unitroot.
adfuller(price.Price,regression='ct')
##p value is high,0.83. like what we saw in the graph, it has an obvious increasing trend since Jan 2019.
adfuller(preturn,regression='ct')
adfuller(preturn,regression='nc')
#p value is very low to reject the null hypothesis, there is no unitroot for Bitcoin price return.
#%%Set the same datatime and merge all datas togther.
dates2 = pd.date_range('2018-12-22', '2019-09-24', freq='h')
ab=pd.DataFrame(index=dates2,data=sentiprof)
ad=pd.DataFrame(index=dates2,data=preturn)
ac=pd.DataFrame(index=dates2,data=senticustom)
btcdata= | pd.concat([ad,ab,ac],axis=1) | pandas.concat |
import warnings
import pandas as pd
import numpy as np
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
import torch
from pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data import GroupNormalizer
from pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss
from config import load_config
warnings.filterwarnings("ignore")
spec = load_config("config.yaml")
BATCH_SIZE = spec["model_local"]["batch_size"]
MAX_EPOCHS = spec["model_local"]["max_epochs"]
GPUS = spec["model_local"]["gpus"]
LEARNING_RATE = spec["model_local"]["learning_rate"]
HIDDEN_SIZE = spec["model_local"]["hidden_size"]
DROPOUT = spec["model_local"]["dropout"]
HIDDEN_CONTINUOUS_SIZE = spec["model_local"]["hidden_continuous_size"]
GRADIENT_CLIP_VAL = spec["model_local"]["gradient_clip_val"]
data = | pd.read_csv("data/poc.csv") | pandas.read_csv |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should be by import and implementation within an IDE, or by editing
section # C00 and running this script directly.
Primary member functions include descriptive docstrings for their intent and
use.
See license.txt for information related to each open-source library used.
WARNING: project file operations are based on relative pathing from the
'scripts' directory this Python script is located in, given the creation of
directories 'GDELTdata' and 'EDAlogs' parallel to 'scripts' upon first
GDELTbase and GDELTeda class initializations.
If those directories are not already present, a fallback method for
string-literal directory reorientation may be found in GDELTbase shared class
data at this tag: # A01a - backup path specification.
Any given user's project directory must be specified there.
See also GDELTeda.py, tag # A02b - Project directory path, as any given user's
project directory must be specified for that os.chdir() call, also.
Contents:
A00 - GDELTbase
A01 - shared class data (toolData, localDb)
A01a - backup path specification
Note: Specification at A01a should be changed to suit a user's desired
directory structure, given their local filesystem.
A02 - __init__ w/ instanced data (localFiles)
B00 - class methods
B01 - updateLocalFilesIndex
B02 - clearLocalFilesIndex
B03 - showLocalFiles
B04 - wipeLocalFiles
B05 - extensionToTableName
B06 - isFileDownloaded
B07 - downloadGDELTFile
B08 - downloadGDELTDay
B09 - cleanFile (includes the following field/subfield parser functions)
B09a - themeSplitter
B09b - locationsSplitter
B09c - personsSplitter
B09d - organizationsSplitter
B09e - toneSplitter
B09f - countSplitter
B09g - One-liner date conversion function for post-read_csv use
B09h - llConverter
B10 - cleanTable
B11 - mongoFile
B12 - mongoTable
C00 - main w/ testing
'''
import pandas as pd
import numpy as np
import os
import pymongo
import wget
import json
from time import time
from datetime import datetime, tzinfo
from zipfile import ZipFile as zf
from pprint import pprint as pp
from urllib.error import HTTPError
# A00
class GDELTbase:
'''Base object for GDELT data acquisition, cleaning, and storage.
Shared class data:
-----------------
toolData - dict with these key - value pairs:
URLbase - "http://data.gdeltproject.org/gdeltv2/"
path - os.path path objects, 'raw' and 'clean', per-table
names - lists of string column names, per-table, original and reduced
extensions - dict mapping table names to file extensions, per-table
columnTypes - dicts mapping table column names to appropriate types
localDb - dict with these key - value pairs:
client - pymongo.MongoClient()
database - pymongo.MongoClient().capstone
collections - dict mapping table names to suitable mongoDB collections
Instanced class data:
--------------------
localFiles - dict, per-table keys for lists of local 'raw' and 'clean'
filenames
Class methods:
-------------
updateLocalFilesIndex()
clearLocalFilesIndex()
showLocalFiles()
wipeLocalFiles()
extensionToTableName()
isFileDownloaded()
downloadGDELTFile()
downloadGDELTDay()
cleanFile()
cleanTable()
mongoFile()
mongoTable()
'''
# A01 - shared class data
toolData = {}
# A01a - backup path specification
# Failsafe path for local main project directory. Must be changed to suit
# location of any given end-user's 'script' directory in case directory
# 'GDELTdata' is not present one directory up.
toolData['projectPath'] = 'C:\\Users\\urf\\Projects\\WGU capstone'
# Controls generation of datafile download URLs in downloadGDELTDay()/File()
toolData['URLbase'] = "http://data.gdeltproject.org/gdeltv2/"
# Used in forming URLs for datafile download
toolData['extensions'] = {
'events' : "export.CSV.zip",
'gkg' : "gkg.csv.zip",
'mentions' : "mentions.CSV.zip",
}
# These paths are set relative to the location of this script, one directory
# up, in 'GDELTdata', parallel to the script directory.
toolData['path'] = {}
toolData['path']['base']= os.path.join(os.path.abspath(__file__),
os.path.realpath('..'),
'GDELTdata')
toolData['path']['events'] = {
'table': os.path.join(toolData['path']['base'], 'events'),
'raw': os.path.join(toolData['path']['base'], 'events', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'events', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'events',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'events',
'realtimeClean')
}
toolData['path']['gkg'] = {
'table': os.path.join(toolData['path']['base'], 'gkg'),
'raw': os.path.join(toolData['path']['base'], 'gkg', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'gkg', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'gkg',
'realtimeClean')
}
toolData['path']['mentions'] = {
'table': os.path.join(toolData['path']['base'], 'mentions'),
'raw': os.path.join(toolData['path']['base'], 'mentions', 'raw'),
'clean': os.path.join(toolData['path']['base'], 'mentions', 'clean'),
'realtimeR' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeRaw'),
'realtimeC' : os.path.join(toolData['path']['base'], 'mentions',
'realtimeClean')
}
# These mappings and lists are for recognition of all possible
# column names, and the specific discarding of a number of columns
# which have been predetermined as unnecessary in the context of
# simple EDA.
toolData['names'] = {}
toolData['names']['events'] = {
'original' : [
'GLOBALEVENTID',
'Day',
'MonthYear',
'Year',
'FractionDate',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1KnownGroupCode',
'Actor1EthnicCode',
'Actor1Religion1Code',
'Actor1Religion2Code',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2KnownGroupCode',
'Actor2EthnicCode',
'Actor2Religion1Code',
'Actor2Religion2Code',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'GoldsteinScale',
'NumMentions',
'NumSources',
'NumArticles',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_CountryCode',
'Actor1Geo_ADM1Code',
'Actor1Geo_ADM2Code',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor1Geo_FeatureID',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_CountryCode',
'Actor2Geo_ADM1Code',
'Actor2Geo_ADM2Code',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'Actor2Geo_FeatureID',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_CountryCode',
'ActionGeo_ADM1Code',
'ActionGeo_ADM2Code',
'ActionGeo_Lat',
'ActionGeo_Long',
'ActionGeo_FeatureID',
'DATEADDED',
'SOURCEURL',
],
'reduced' : [
'GLOBALEVENTID',
'Actor1Code',
'Actor1Name',
'Actor1CountryCode',
'Actor1Type1Code',
'Actor1Type2Code',
'Actor1Type3Code',
'Actor2Code',
'Actor2Name',
'Actor2CountryCode',
'Actor2Type1Code',
'Actor2Type2Code',
'Actor2Type3Code',
'IsRootEvent',
'EventCode',
'EventBaseCode',
'EventRootCode',
'QuadClass',
'AvgTone',
'Actor1Geo_Type',
'Actor1Geo_FullName',
'Actor1Geo_Lat',
'Actor1Geo_Long',
'Actor2Geo_Type',
'Actor2Geo_FullName',
'Actor2Geo_Lat',
'Actor2Geo_Long',
'ActionGeo_Type',
'ActionGeo_FullName',
'ActionGeo_Lat',
'ActionGeo_Long',
'DATEADDED',
'SOURCEURL',
],
}
toolData['names']['gkg'] = {
'original' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCollectionIdentifier',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V21Counts',
'V1Themes',
'V2EnhancedThemes',
'V1Locations',
'V2EnhancedLocations',
'V1Persons',
'V2EnhancedPersons',
'V1Organizations',
'V2EnhancedOrganizations',
'V15Tone',
'V21EnhancedDates',
'V2GCAM',
'V21SharingImage',
'V21RelatedImages',
'V21SocialImageEmbeds',
'V21SocialVideoEmbeds',
'V21Quotations',
'V21AllNames',
'V21Amounts',
'V21TranslationInfo',
'V2ExtrasXML',
],
'reduced' : [
'GKGRECORDID',
'V21DATE',
'V2SourceCommonName',
'V2DocumentIdentifier',
'V1Counts',
'V1Themes',
'V1Locations',
'V1Persons',
'V1Organizations',
'V15Tone',
],
}
toolData['names']['mentions'] = {
'original' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'SentenceID', #
'Actor1CharOffset',#
'Actor2CharOffset',#
'ActionCharOffset',#
'InRawText',
'Confidence',
'MentionDocLen', #
'MentionDocTone',
'MentionDocTranslationInfo', #
'Extras', #
],
'reduced' : [
'GLOBALEVENTID',
'EventTimeDate',
'MentionTimeDate',
'MentionType',
'MentionSourceName',
'MentionIdentifier',
'InRawText',
'Confidence',
'MentionDocTone',
],
}
# These mappings are used in automated dtype application to Pandas
# DataFrame collections of GDELT records, part of preprocessing.
toolData['columnTypes'] = {}
toolData['columnTypes']['events'] = {
'GLOBALEVENTID' : type(1),
'Actor1Code': pd.StringDtype(),
'Actor1Name': pd.StringDtype(),
'Actor1CountryCode': pd.StringDtype(),
'Actor1Type1Code' : pd.StringDtype(),
'Actor1Type2Code' : pd.StringDtype(),
'Actor1Type3Code' : | pd.StringDtype() | pandas.StringDtype |
import pandas as pd
import numpy as np
import knackpy as kp
import fulcrum as fc
import requests
import pdb
import json
from datetime import datetime, timedelta
from pypgrest import Postgrest
from tdutils import argutil
from config.secrets import *
form_id = "44359e32-1a7f-41bd-b53e-3ebc039bd21a"
key = FULCRUM_CRED.get("api_key")
# create postgrest instance
pgrest = Postgrest(
"http://transportation-data.austintexas.io/signal_pms", auth=JOB_DB_API_TOKEN
)
def get_pgrest_records():
"""Summary
Returns:
TYPE: Description
"""
# the datetime converstin for modified_date is not right. The time part are missing
params = {}
results = pgrest.select(params=params)
if len(results) != 0:
results = | pd.DataFrame(results) | pandas.DataFrame |
"""
Module contains miscellaneous functions used for reading data, printing logo etc.
"""
import pickle
from random import sample
import networkx as nx
import pandas as pd
def read_testcase(FOLDER):
"""
Reads the GTFS network and preprocessed dict. If the dicts are not present, dict_builder_functions are called to construct them.
Returns:
stops_file (pandas.dataframe): stops.txt file in GTFS.
trips_file (pandas.dataframe): trips.txt file in GTFS.
stop_times_file (pandas.dataframe): stop_times.txt file in GTFS.
transfers_file (pandas.dataframe): dataframe with transfers (footpath) details.
stops_dict (dict): keys: route_id, values: list of stop id in the route_id. Format-> dict[route_id] = [stop_id]
stoptimes_dict (dict): keys: route ID, values: list of trips in the increasing order of start time. Format-> dict[route_ID] = [trip_1, trip_2] where trip_1 = [(stop id, arrival time), (stop id, arrival time)]
footpath_dict (dict): keys: from stop_id, values: list of tuples of form (to stop id, footpath duration). Format-> dict[stop_id]=[(stop_id, footpath_duration)]
route_by_stop_dict_new (dict): keys: stop_id, values: list of routes passing through the stop_id. Format-> dict[stop_id] = [route_id]
idx_by_route_stop_dict (dict): preprocessed dict. Format {(route id, stop id): stop index in route}.
"""
import gtfs_loader
from dict_builder import dict_builder_functions
stops_file, trips_file, stop_times_file, transfers_file = gtfs_loader.load_all_db(FOLDER)
try:
stops_dict, stoptimes_dict, footpath_dict, routes_by_stop_dict, idx_by_route_stop_dict = gtfs_loader.load_all_dict(FOLDER)
except FileNotFoundError:
stops_dict = dict_builder_functions.build_save_stops_dict(stop_times_file, trips_file, FOLDER)
stoptimes_dict = dict_builder_functions.build_save_stopstimes_dict(stop_times_file, trips_file, FOLDER)
routes_by_stop_dict = dict_builder_functions.build_save_route_by_stop(stop_times_file, FOLDER)
footpath_dict = dict_builder_functions.build_save_footpath_dict(transfers_file, FOLDER)
idx_by_route_stop_dict = dict_builder_functions.stop_idx_in_route(stop_times_file, FOLDER)
return stops_file, trips_file, stop_times_file, transfers_file, stops_dict, stoptimes_dict, footpath_dict, routes_by_stop_dict, idx_by_route_stop_dict
def print_logo():
"""
Prints the logo
"""
print("""
****************************************************************************************
* TRANSIT ROUTING ALGORITHMS *
* <NAME> <NAME> *
* (<EMAIL>) (<EMAIL>) *
****************************************************************************************
""")
return None
def print_network_details(transfers_file, trips_file, stops_file):
"""
Prints the network details like number of routes, trips, stops, footpath
Args:
transfers_file (pandas.dataframe):
trips_file (pandas.dataframe):
stops_file (pandas.dataframe):
Returns: None
"""
print("___________________________Network Details__________________________")
print("| No. of Routes | No. of Trips | No. of Stops | No. of Footapths |")
print(
f"| {len(set(trips_file.route_id))} | {len(set(trips_file.trip_id))} | {len(set(stops_file.stop_id))} | {len(transfers_file)} |")
print("____________________________________________________________________")
return None
def print_query_parameters(SOURCE, DESTINATION, D_TIME, MAX_TRANSFER, WALKING_FROM_SOURCE, variant, no_of_partitions=None,
weighting_scheme=None, partitioning_algorithm=None):
"""
Prints the input parameters related to the shortest path query
Args:
SOURCE (int): stop-id DESTINATION stop
DESTINATION (int/list): stop-id SOURCE stop. For Onetomany algorithms, this is a list.
D_TIME (pandas.datetime): Departure time
MAX_TRANSFER (int): Max transfer limit
WALKING_FROM_SOURCE (int): 1 or 0. 1 means walking from SOURCE is allowed.
variant (int): variant of the algorithm. 0 for normal version,
1 for range version,
2 for One-To-Many version,
3 for Hyper version
no_of_partitions: number of partitions network has been divided into
weighting_scheme: which weighing scheme has been used to generate partitions.
partitioning_algorithm: which algorithm has been used to generate partitions.
Returns: None
"""
print("___________________Query Parameters__________________")
print("Network: Switzerland")
print(f"SOURCE stop id: {SOURCE}")
print(f"DESTINATION stop id: {DESTINATION}")
print(f"Maximum Transfer allowed: {MAX_TRANSFER}")
print(f"Is walking from SOURCE allowed ?: {WALKING_FROM_SOURCE}")
if variant == 2 or variant == 1:
print(f"Earliest departure time: 24 hour (Profile Query)")
else:
print(f"Earliest departure time: {D_TIME}")
if variant == 4:
print(f"Number of partitions: {no_of_partitions}")
print(f"Partitioning Algorithm used: {partitioning_algorithm}")
print(f"Weighing scheme: {weighting_scheme}")
print("_____________________________________________________")
return None
def read_partitions(stop_times_file, FOLDER, no_of_partitions, weighting_scheme, partitioning_algorithm):
"""
Reads the fill-in information.
Args:
stop_times_file (pandas.dataframe): dataframe with stoptimes details
FOLDER (str): path to network folder.
no_of_partitions (int): number of partitions network has been divided into.
weighting_scheme (str): which weighing scheme has been used to generate partitions.
partitioning_algorithm (str):which algorithm has been used to generate partitions. Currently supported arguments are hmetis or kahypar.
Returns:
stop_out (dict) : key: stop-id (int), value: stop-cell id (int). Note: if stop-cell id of -1 denotes cut stop.
route_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of route ids belonging to the stop cell combination
cut_trips (set): set of trip ids that are part of fill-in.
trip_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of trip ids belonging to the stop cell combination
"""
import itertools
if partitioning_algorithm == "hmetis":
route_out = pd.read_csv(f'./partitions/{FOLDER}/routeout_{weighting_scheme}_{no_of_partitions}.csv',
usecols=['path_id', 'group']).groupby('group')
stop_out = pd.read_csv(f'./partitions/{FOLDER}/cutstops_{weighting_scheme}_{no_of_partitions}.csv', usecols=['stop_id', 'g_id'])
fill_ins = pd.read_csv(f'./partitions/{FOLDER}/fill_ins_{weighting_scheme}_{no_of_partitions}.csv')
elif partitioning_algorithm == "kahypar":
route_out = pd.read_csv(f'./kpartitions/{FOLDER}/routeout_{weighting_scheme}_{no_of_partitions}.csv', usecols=['path_id', 'group']).groupby('group')
stop_out = pd.read_csv(f'./kpartitions/{FOLDER}/cutstops_{weighting_scheme}_{no_of_partitions}.csv', usecols=['stop_id', 'g_id']).astype(int)
fill_ins = pd.read_csv(f'./kpartitions/{FOLDER}/fill_ins_{weighting_scheme}_{no_of_partitions}.csv')
fill_ins.fillna(-1, inplace=True)
fill_ins['routes'] = fill_ins['routes'].astype(int)
print(f'_________Fill-in information for {len(set(stop_out.g_id)) - 1} Partition_________')
print(
f'Number of cutstops: {(len(stop_out[stop_out.g_id == -1]))} ({round((len(stop_out[stop_out.g_id == -1])) / (len(stop_out)) * 100, 2)}%)')
stop_out = {row.stop_id: row.g_id for _, row in stop_out.iterrows()}
cut_trips = set(fill_ins['trips'])
route_partitions, trip_partitions = {}, {}
for g_id, rotes in route_out:
route_partitions[g_id] = set((rotes['path_id']))
trip_partitions[g_id] = set(stop_times_file[stop_times_file.route_id.isin(route_partitions[g_id])].trip_id)
trip_partitions[-1] = set(fill_ins['trips'])
grups = list(itertools.combinations(trip_partitions.keys(), 2))
trip_groups = {}
for group in grups:
trip_groups[tuple(sorted(group))] = trip_partitions[group[0]].union(trip_partitions[group[1]]).union(
trip_partitions[-1])
for x in trip_partitions.keys():
trip_groups[(x, x)] = trip_partitions[x].union(trip_partitions[-1])
route_partitions[-1] = set(fill_ins['routes'])
route_partitions[-1].remove(-1)
route_groups = {}
for group in grups:
route_groups[tuple(sorted(group))] = route_partitions[group[0]].union(route_partitions[group[1]]).union(
route_partitions[-1])
for x in route_partitions.keys():
route_groups[(x, x)] = route_partitions[x].union(route_partitions[-1])
print(f"fill-in trips: {len(cut_trips)} ({round(len(cut_trips) / len(set(stop_times_file.trip_id)) * 100, 2)}%)")
print(
f'fill-in routes: {len(set(fill_ins.routes)) - 1} ({round((len(set(fill_ins.routes)) - 1) / len(set(stop_times_file.route_id)) * 100, 2)}%)')
print("____________________________________________________")
return stop_out, route_groups, cut_trips, trip_groups
def read_nested_partitions(stop_times_file, FOLDER, no_of_partitions, weighting_scheme):
"""
Read fill-ins in case of nested partitioning.
Args:
stop_times_file (pandas.dataframe): dataframe with stoptimes details
FOLDER (str): path to network folder.
no_of_partitions (int): number of partitions network has been divided into.
weighting_scheme (str): which weighing scheme has been used to generate partitions.
Returns:
stop_out (dict) : key: stop-id (int), value: stop-cell id (int). Note: if stop-cell id of -1 denotes cut stop.
route_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of route ids belonging to the stop cell combination
cut_trips (set): set of trip ids that are part of fill-in.
trip_groups (dict): key: tuple of all possible combinations of stop cell id, value: set of trip ids belonging to the stop cell combination
"""
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
import itertools
main_partitions = no_of_partitions
route_out = pd.read_csv(f'./kpartitions/{FOLDER}/nested/nested_route_out_{weighting_scheme}_{main_partitions}.csv')
stop_out = pd.read_csv(f'./kpartitions/{FOLDER}/nested/nested_cutstops_{weighting_scheme}_{main_partitions}.csv')
fill_ins = pd.read_csv(f'./kpartitions/{FOLDER}//nested/nested_fill_ins_{weighting_scheme}_{main_partitions}.csv')
fill_ins.fillna(-1, inplace=True)
fill_ins['routes'] = fill_ins['routes'].astype(int)
temp = stop_out.drop(columns=['lat', 'long', 'boundary_g_id'])
cut_stops_db = temp[temp.isin([-1]).any(axis=1)]
# print(f'Upper Partition: {len(set(stop_out.g_id)) - 1} (2-way nesting)')
# print(f'{len(cut_stops_db)} ({round((len(cut_stops_db)) / (len(stop_out)) * 100, 2)} Total cutstops%)')
start = 0
normal_stops = stop_out[~stop_out.index.isin(cut_stops_db.index)]
for x in set(normal_stops.g_id):
normal_stops.loc[:, f"lower_cut_stops_{x}"] = normal_stops[f"lower_cut_stops_{x}"] + start
start = start + 2
stop_out = {row.stop_id: row[f"lower_cut_stops_{row.g_id}"] for _, row in normal_stops.iterrows()}
stop_out.update({stopp: -1 for stopp in set(cut_stops_db.stop_id)})
route_partitions, trip_partitions = {}, {}
route_groups = route_out.groupby('group')
for g_id, rotes in route_groups:
route_partitions[g_id] = set((rotes['path_id']))
trip_partitions[g_id] = set(stop_times_file[stop_times_file.route_id.isin(route_partitions[g_id])].trip_id)
trip_partitions[-1] = set(fill_ins['trips'])
grups = list(itertools.combinations(trip_partitions.keys(), 2))
trip_groups = {}
for group in grups:
trip_groups[tuple(sorted(group))] = trip_partitions[group[0]].union(trip_partitions[group[1]]).union(
trip_partitions[-1])
for x in trip_partitions.keys():
trip_groups[(x, x)] = trip_partitions[x].union(trip_partitions[-1])
route_partitions[-1] = set(fill_ins['routes'])
route_partitions[-1].remove(-1)
grups = list(itertools.combinations(route_partitions.keys(), 2))
route_groups = {}
for group in grups:
route_groups[tuple(sorted(group))] = route_partitions[group[0]].union(route_partitions[group[1]]).union(
route_partitions[-1])
for x in route_partitions.keys():
route_groups[(x, x)] = route_partitions[x].union(route_partitions[-1])
cut_trips = set(fill_ins['trips'])
# print(f"{len(cut_trips)} ({round(len(cut_trips) / len(set(stop_times_file.trip_id)) * 100, 2)}%) are cut trips")
# print(f'{len(set(fill_ins.routes)) - 1} ({round((len(set(fill_ins.routes)) - 1) / len(set(stop_times_file.route_id)) * 100, 2)})% are cut routes')
return stop_out, route_groups, cut_trips, trip_groups
def check_nonoverlap(stoptimes_dict, stops_dict):
'''
Check for non overlapping trips in stoptimes_dict. If found, it reduces the timestamp of the earlier trip by 1 second.
This process is repeated untill overlapping trips=null. Note 1 second is taken so as to avoid creation of new overlapping trips
due to timestamp correction.
Args:
stoptimes_dict (dict): preprocessed dict. Format {route_id: [[trip_1], [trip_2]]}.
Returns:
overlap (set): set of routes with overlapping trips.
'''
for x in stops_dict.items():
if len(x[1]) != len(set(x[1])):
print(f'duplicates stops in a route {x}')
overlap = set() #Collect routes with non-overlapping trips
for r_idx, route_trips in stoptimes_dict.items():
for x in range(len(route_trips) - 1):
first_trip = route_trips[x]
second_trip = route_trips[x + 1]
if any([second_trip[idx][1] <= first_trip[idx][1] for idx, stamp in enumerate(first_trip)]):
overlap = overlap.union({r_idx})
if overlap:
print(f"{len(overlap)} have overlapping trips")
while overlap:
for r_idx in overlap: #Correct routes with non-overlapping trips
route_trips = stoptimes_dict[r_idx].copy()
for x in range(len(route_trips) - 1):
first_trip = route_trips[x]
second_trip = route_trips[x + 1]
for idx, _ in enumerate(first_trip):
if second_trip[idx][1] <= first_trip[idx][1]:
stoptimes_dict[r_idx][x][idx] = (second_trip[idx][0], second_trip[idx][1] - | pd.to_timedelta(1, unit="seconds") | pandas.to_timedelta |
import re
import warnings
import numpy as np
import pandas as pd
import scipy
from pandas import DataFrame
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.preprocessing import MultiLabelBinarizer, Normalizer
from tqdm import tqdm
class BaseRecommender(object):
def __init__(self, items_path: str, train_path: str, test_path: str, val_path: str) -> None:
"""Base recommender class
Args:
items_path (str): Path to pickle file containing the items
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
"""
items = self._preprocess_items(pd.read_pickle(items_path))
self.items, self.metadata = self._generate_item_features(items)
self.train = self._preprocess_train(pd.read_parquet(train_path))
self.test = pd.read_parquet(test_path) if test_path else None
self.val = pd.read_parquet(val_path) if val_path else None
self.recommendations = DataFrame()
def _preprocess_items(self, items: DataFrame) -> DataFrame:
"""Applies preprocessing to the items
Args:
items (DataFrame): Dataframe containing all items with their metadata
Returns:
DataFrame: Sanitised item metadata
"""
### borrowed from data processing script
sentiment_map = {
'Overwhelmingly Negative' : (0.1, 1.0),
'Very Negative' : (0.1, 0.6),
'Negative' : (0.1, 0.1),
'Mostly Negative' : (0.3, 0.5),
'1 user reviews' : (0.5, 0.002),
'2 user reviews' : (0.5, 0.004),
'3 user reviews' : (0.5, 0.006),
'4 user reviews' : (0.5, 0.008),
'5 user reviews' : (0.5, 0.010),
'6 user reviews' : (0.5, 0.012),
'7 user reviews' : (0.5, 0.014),
'8 user reviews' : (0.5, 0.016),
'9 user reviews' : (0.5, 0.018),
'Mixed' : (0.55, 0.5),
'Mostly Positive' : (0.75, 0.5),
'Positive' : (0.9, 0.1),
'Very Positive' : (0.9, 0.6),
'Overwhelmingly Positive' : (1.0, 1.0),
}
# fill nan with '1 user reviews'
sentiment = items['sentiment'].apply(lambda x: x if isinstance(x, str) else '1 user reviews')
# create new columns based on the sentiment
items['sentiment_rating'] = sentiment.apply(lambda x: sentiment_map[x][0])
items['sentiment_n_reviews'] = sentiment.apply(lambda x: sentiment_map[x][1])
### stop borrow
items["price"] = items["price"].apply(lambda p: np.float32(p) if re.match(r"\d+(?:.\d{2})?", str(p)) else 0)
items["metascore"] = items["metascore"].apply(lambda m: m if m != "NA" else np.nan)
items["developer"].fillna(value='', inplace=True)
items["developer"] = items["developer"].apply(lambda my_str: my_str.lower().split(','))
items["publisher"].fillna(value='', inplace=True)
items["publisher"] = items["publisher"].apply(lambda my_str: my_str.lower().split(','))
items["early_access"] = items["early_access"].apply(lambda x: ["earlyaccess"] if x else [])
items["specs"] = items["specs"].fillna("")
items["specs"] = items["specs"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
items["tags"] = items["tags"].fillna("")
items["tags"] = items["tags"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
items["genres"] = items["genres"].fillna("")
items["genres"] = items["genres"].apply(lambda l: [re.subn(r"[^a-z0-9]", "", my_str.lower())[0] for my_str in l])
return items
def _preprocess_train(self, train: DataFrame) -> DataFrame:
"""Applies preprocessing to the training set
Args:
train (DataFrame): Dataframe containing all training data
Returns:
DataFrame: Sanitised training data
"""
train["normalized_playtime_forever_sum"] = train.apply(lambda x: (np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2))/np.sum(np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2)), axis=1)
train["normalized_playtime_forever_max"] = train.apply(lambda x: (np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2))/np.max(np.log(np.array(x["playtime_forever"]) + np.array(x["playtime_2weeks"]) + 2)), axis=1)
return train
def set_user_data(self, train_path: str, test_path: str, val_path: str) -> None:
"""Read new train, test and val data
Args:
train_path (str): Path to train parquet file
test_path (str): Path to test parquet file
val_path (str): Path to validation parquet file
"""
self.train = pd.read_parquet(train_path)
self.test = pd.read_parquet(test_path)
self.val = pd.read_parquet(val_path)
def _generate_item_features(self, items: DataFrame):
"""Generates the item representations
Args:
items (DataFrame): Dataframe containing only relevant metadata
"""
pass
def evaluate(self, k=10, val=False) -> dict:
"""Evaluate the recommendations
Args:
filename (str, optional): filename for qualitative evaluation. Defaults to None.
qual_eval_folder (str, optional): output folder for qualitative evaluation. Defaults to None.
k (int, optional): Amount of recommendations to consider. Defaults to 10.
val (bool, optional): Wether or not to use test or validation dataset. Defaults to False.
Returns:
dict: a dict containing the hitrate@k, recall@k and nDCG@k
"""
gt = self.val if val else self.test
gt.rename(columns={"item_id": "items"}, inplace=True)
eval = self.recommendations
eval = eval.merge(gt, left_index=True, right_index=True)
results_dict = dict()
# Cap to k recommendations
eval['recommendations'] = eval['recommendations'].apply(lambda rec: rec[:k])
# compute HR@k
eval['HR@k'] = eval.apply(lambda row: int(any(item in row['recommendations'] for item in row['items'])), axis=1)
results_dict[f'HR@{k}'] = eval['HR@k'].mean()
# compute nDCG@k
eval['nDCG@k'] = eval.apply(lambda row: np.sum([int(rec in row['items'])/(np.log2(i+2)) for i, rec in enumerate(row['recommendations'])]), axis=1)
eval['nDCG@k'] = eval.apply(lambda row: row['nDCG@k']/np.sum([1/(np.log2(i+2)) for i in range(min(k, len(row['items'])))]), axis=1)
results_dict[f'nDCG@{k}'] = eval['nDCG@k'].mean()
# compute recall@k
eval['items'] = eval['items'].apply(set)
eval['recommendations'] = eval['recommendations'].apply(set)
eval['recall@k'] = eval.apply(lambda row: len(row['recommendations'].intersection(row['items']))/len(row['items']), axis=1)
results_dict[f'recall@{k}'] = eval['recall@k'].mean()
# compute ideal recall@k
eval['ideal_recall@k'] = eval.apply(lambda row: min(k, len(row['items']))/len(row['items']), axis=1)
results_dict[f'ideal_recall@{k}'] = eval['ideal_recall@k'].mean()
# compute normalised recall@k
eval['nRecall@k'] = eval.apply(lambda row: row['recall@k']/row['ideal_recall@k'], axis=1)
results_dict[f'nRecall@{k}'] = eval['nRecall@k'].mean()
return results_dict
def qualitative_evaluation(self, users:list=[], export_path:str=None) -> DataFrame:
eval_data = self.recommendations if len(users) == 0 else self.recommendations.iloc[users]
new_data = DataFrame({"owned_items": eval_data["item_id"].apply(lambda row: [self.metadata.at[id, "app_name"] for id in row]),
"recommended_items": eval_data["recommendations"].apply(lambda row: [self.metadata.at[id, "app_name"] for id in row])}, index=eval_data.index)
if export_path:
new_data.to_csv(export_path)
return new_data
class ContentBasedRecommender(BaseRecommender):
def __init__(self, items_path: str, train_path: str, test_path: str, val_path: str, sparse: bool = True, tfidf='default', normalize=False, columns:list=["genres", "tags"]) -> None:
"""Content based recommender
Args:
items_path (str): Path to pickle file containing the items
train_path (str): Path to train data parquet file
test_path (str): Path to test data parquet file
val_path (str): Path to validation data parquet file
sparse (bool, optional): If sparse representation should be used. Defaults to True.
tfidf (str, optional): Which tf-idf method to use. Defaults to 'default'.
normalize (bool, optional): If normalization should be used. Defaults to False.
columns (list, optional): Columns to use for feature representation. Defaults to ["genres", "tags"].
"""
self.sparse = sparse
self.normalize = normalize
self.recommendations = None
self.normalizer = Normalizer(copy=False)
self.columns = columns
# Select tf-idf method to use
self.tfidf = None
if tfidf == 'default':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=False)
elif tfidf == 'smooth':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=False)
elif tfidf == 'sublinear':
self.tfidf = TfidfTransformer(smooth_idf=False, sublinear_tf=True)
elif tfidf == 'smooth_sublinear':
self.tfidf = TfidfTransformer(smooth_idf=True, sublinear_tf=True)
# Select algorithm to use for neighbour computation
algorithm = 'auto'
self.method = NearestNeighbors(n_neighbors=10, algorithm=algorithm, metric='cosine')
super(ContentBasedRecommender, self).__init__(items_path, train_path, test_path, val_path)
def _process_item_features(self, items: DataFrame) -> DataFrame:
"""Processes the item metadata for feature generation
Args:
items (DataFrame): Dataframe containing items metadata
Returns:
DataFrame: Dataframe containing only relevant data for feature generation
"""
return items.filter(self.columns), items.filter([col for col in items.columns if col not in self.columns+["index"]])
def _generate_item_features(self, items: DataFrame) -> DataFrame:
"""Generates feature vector of items and appends to returned DataFrame
Args:
items (DataFrame): dataframe containing the items
Returns:
DataFrame: dataframe with feature vector appended
"""
items, metadata = self._process_item_features(items)
# Combine all features into one column
columns = items.columns.tolist()
for col in columns:
items[col] = items[col].fillna("").apply(set)
items["tags"] = items.apply(lambda x: list(
set.union(*([x[col] for col in columns]))), axis=1)
if "tags" in columns:
columns.remove("tags")
items = items.drop(columns, axis=1)
# Compute one-hot encoded vector of tags
mlb = MultiLabelBinarizer(sparse_output=self.sparse)
if self.sparse:
items = items.join(DataFrame.sparse.from_spmatrix(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
else:
items = items.join(DataFrame(mlb.fit_transform(items.pop(
"tags")), index=items.index, columns=["tag_" + c for c in mlb.classes_]))
return items, metadata
def generate_recommendations(self, amount=10, read_max=None) -> None:
"""Generate recommendations based on user review data
Args:
amount (int, optional): Amount of times to recommend. Defaults to 10.
read_max (int, optional): Max amount of users to read. Defaults to None.
"""
items = self.items
df = self.train.iloc[:read_max].copy(deep=True) if read_max else self.train
# Drop id so only feature vector is left
if self.sparse:
X = scipy.sparse.csr_matrix(items.values)
else:
X = np.array(items.values)
if self.tfidf:
# Use tf-idf
X = self.tfidf.fit_transform(X)
if self.normalize:
X = self.normalizer.fit_transform(X)
# Transformed feature vector back into items
if self.sparse:
items = DataFrame.sparse.from_spmatrix(X)
else:
items = | DataFrame(X) | pandas.DataFrame |
import os
pat = "/storage/research/Intern19_v2/AutomatedDetectionWSI/LiverImages/"
#pat_1 = "/storage/research/Intern19_v2/AutomatedDetectionWSI/level_1/"
#pat_2 = "/storage/research/Intern19_v2/AutomatedDetectionWSI/level_2/"
a= os.walk(pat)
a = list(a)
l = []
for i in a[0][2]:
if '.xml' in i or 'svs' in i or 'SVS' in i:
continue
else:
l.append(i)
print(len(l))
#from pyslide import pyramid
from skimage import io
whole = {}
viable = {}
for i in l:
p = os.path.join(pat,i)
print(p)
l_1 = io.imread(p)
#print("l_1 loaded")
d = i[:-4] # 01_01_0083_l_0
print(d, l_1.shape)
if 'whole' in d:
whole[d] = l_1.shape
print("whole")
else:
viable[d] = l_1.shape
print("viable")
import pandas as pd
df = | pd.DataFrame(whole) | pandas.DataFrame |
"""
Created on Wed Nov 18 14:20:22 2020
@author: MAGESHWARI
"""
import os
from tkinter import *
from tkinter import messagebox as mb
from tkinter import filedialog
import re
import csv
import pandas as pd
def center_window(w=200, h=500):
# get screen width and height
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
def browse1():
global df1
# global directory
# global filename
# global contents
filepath = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
select_file_field.insert(0,filepath) # insert the path in textbox
df1 = pd.read_csv(filepath)
# file = open(filepath,'r') # open the selected file
# contents = file.read()
# print(contents)
def browse2():
global df2
global basefilepath
basefilepath = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
base_file_field.insert(0,basefilepath) # insert the path in textbox
df2 = | pd.read_csv(basefilepath) | pandas.read_csv |
from typing import Union
import numpy as np
import pandas as pd
import modin.pandas as mpd
from datetime import datetime, timedelta
import calendar
def convert_date(date: Union[datetime, str, pd.Series, np.ndarray]) -> np.ndarray:
"""Receives `date` from a variety of datatypes and converts it into a numeric value in a numpy array.
If `date` is a vector then it returns a vector otherwise it returns a single scalar value.
Args:
date (Union[datetime, str, pd.Series, np.ndarray]): The date to be converted
Returns:
np.ndarray: A NumPy array with datatype datetime64[D].
"""
if isinstance(date, int):
date = pd.to_datetime(str(date))
elif isinstance(date, float):
year = int(date)
days_in_year = 366 if calendar.isleap(year) else 365
date = datetime(year, 1, 1) + timedelta(days=(date % 1) * days_in_year)
elif isinstance(date, np.ndarray):
if np.issubdtype(date.dtype, np.integer):
date = date.astype(str)
date = | pd.to_datetime(date) | pandas.to_datetime |
import json
import os
from typing import Union
import numpy as np
import pandas as pd
from mlflow.exceptions import MlflowException
from mlflow.types.utils import TensorsNotSupportedException
from mlflow.utils.proto_json_utils import NumpyEncoder
ModelInputExample = Union[pd.DataFrame, np.ndarray, dict, list]
class _Example(object):
"""
Represents an input example for MLflow model.
Contains jsonable data that can be saved with the model and meta data about the exported format
that can be saved with :py:class:`Model <mlflow.models.Model>`.
The _Example is created from example data provided by user. The example(s) can be provided as
pandas.DataFrame, numpy.ndarray, python dictionary or python list. The assumption is that the
example is a DataFrame-like dataset with jsonable elements (see storage format section below).
NOTE: Multidimensional (>2d) arrays (aka tensors) are not supported at this time.
NOTE: If the example is 1 dimensional (e.g. dictionary of str -> scalar, or a list of scalars),
the assumption is that it is a single row of data (rather than a single column).
Metadata:
The _Example metadata contains the following information:
- artifact_path: Relative path to the serialized example within the model directory.
- type: Type of example data provided by the user. E.g. dataframe.
- pandas_orient: For dataframes, this attribute specifies how is the dataframe encoded in
json. For example, "split" value signals that the data is stored as object
with columns and data attributes.
Storage Format:
The examples are stored as json for portability and readability. Therefore, the contents of the
example(s) must be jsonable. Mlflow will make the following conversions automatically on behalf
of the user:
- binary values: :py:class:`bytes` or :py:class:`bytearray` are converted to base64
encoded strings.
- numpy types: Numpy types are converted to the corresponding python types or their closest
equivalent.
"""
def __init__(self, input_example: ModelInputExample):
def _is_scalar(x):
return np.isscalar(x) or x is None
if isinstance(input_example, dict):
for x, y in input_example.items():
if isinstance(y, np.ndarray) and len(y.shape) > 1:
raise TensorsNotSupportedException(
"Column '{0}' has shape {1}".format(x, y.shape))
if all([_is_scalar(x) for x in input_example.values()]):
input_example = pd.DataFrame([input_example])
else:
input_example = pd.DataFrame.from_dict(input_example)
elif isinstance(input_example, list):
for i, x in enumerate(input_example):
if isinstance(x, np.ndarray) and len(x.shape) > 1:
raise TensorsNotSupportedException("Row '{0}' has shape {1}".format(i, x.shape))
if all([_is_scalar(x) for x in input_example]):
input_example = pd.DataFrame([input_example])
else:
input_example = pd.DataFrame(input_example)
elif isinstance(input_example, np.ndarray):
if len(input_example.shape) > 2:
raise TensorsNotSupportedException("Input array has shape {}".format(
input_example.shape))
input_example = | pd.DataFrame(input_example) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
move_df = MoveDataFrame(list_move)
list_events = [
[39.984094, 116.319236, 1, Timestamp('2008-10-24 01:57:57'),
'show do tropykalia'],
[39.991013, 116.326384, 2, Timestamp('2008-10-24 00:22:01'),
'evento da prefeitura'],
[40.01, 116.312615, 3, Timestamp('2008-10-25 00:21:01'),
'show do seu joao'],
[40.013821, 116.306531, 4, Timestamp('2008-10-26 00:22:01'),
'missa']
]
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-23T03:53:05.000000000', '2008-10-23T08:37:26.000000000',
'2008-10-23T08:50:16.000000000', '2008-10-23T09:03:06.000000000',
'2008-10-23T09:58:33.000000000', '2008-10-23T21:50:45.000000000',
'2008-10-23T22:02:14.000000000', '2008-10-23T22:22:01.000000000',
'2008-10-23T23:57:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T07:53:05.000000000', '2008-10-23T12:37:26.000000000',
'2008-10-23T12:50:16.000000000', '2008-10-23T13:03:06.000000000',
'2008-10-23T13:58:33.000000000', '2008-10-24T01:50:45.000000000',
'2008-10-24T02:02:14.000000000', '2008-10-24T02:22:01.000000000',
'2008-10-24T03:57:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(9, None, dtype=np.ndarray)
type_expected = np.full(9, None, dtype=np.ndarray)
id_expected = np.full(9, None, dtype=np.ndarray)
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window_and_creates_event_id_type_all(
move_df, pois, 7200, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_join_with_pois():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 'distrito_pol_1'],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 6, 128.24869775642176, 'adocao_de_animais'],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 5, 663.0104596559174, 'rinha_de_galo_world_cup'],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 4, 286.3387434682031, 'forro_tropykalia'],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 4,
0.9311014399622559, 'forro_tropykalia'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 3,
211.06912863495492, 'supermercado_aroldo'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'policia_federal'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 6,
792.7526066105717, 'adocao_de_animais'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 7,
270.7018856738821, 'dia_do_municipio']
],
columns=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, ID_POI, DIST_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_pois_by_category():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 3, 2935.3102772960456, 7, 814.8193850933852, 5,
2672.393533820207, 6, 675.1730686007362],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 1, 637.6902157810676, 3, 3072.6963790707114, 7,
1385.3649632111096, 5, 2727.1360691122813, 6, 128.24869775642176],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 2, 1385.0871812075436, 3, 1094.8606633486436, 4,
1762.0085654338782, 5, 663.0104596559174, 6, 1965.702358742657],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 2, 3225.288830967221, 3, 810.5429984051405, 4,
286.3387434682031, 5, 1243.8915481769327, 6, 3768.0652637796675],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 3, 669.9731550451877, 4, 0.9311014399622559,
5, 1145.172578151837, 6, 3574.252994707609],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 3, 211.06912863495492, 4, 857.4175399672413,
5, 289.35378153627966, 6, 2855.1657930463994],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 3, 2179.5701631051966, 7,
2003.4096341742952, 5, 1784.3132149978549, 6, 870.5252810680124],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 3, 3702.2394204188754, 7, 1287.7039084016499,
5, 3376.4438614084356, 6, 792.7526066105717],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 3, 3154.296880053552, 7, 270.7018856738821, 5,
2997.898227057909, 6, 1443.9247752786023]
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, 'id_policia', 'dist_policia',
'id_comercio', 'dist_comercio', 'id_show', 'dist_show', 'id_risca-faca',
'dist_risca-faca', 'id_evento', 'dist_evento'
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois_by_category(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_events():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1,
'', inf, ''],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, '', inf, ''],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, '', inf, ''],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, '', inf, ''],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 'evento da prefeitura'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 'evento da prefeitura'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'evento da prefeitura'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 'show do tropykalia'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 'show do tropykalia']
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, EVENT_ID, DIST_EVENT, EVENT_TYPE
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_events(move_df, pois, time_window=45000, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_event_by_dist_and_time():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
move_df = MoveDataFrame(list_move)
list_events = [
[39.984094, 116.319236, 1, Timestamp('2008-10-24 01:57:57'),
'show do tropykalia'],
[39.991013, 116.326384, 2, Timestamp('2008-10-24 00:22:01'),
'evento da prefeitura'],
[40.01, 116.312615, 3, Timestamp('2008-10-25 00:21:01'),
'show do seu joao'],
[40.013821, 116.306531, 4, Timestamp('2008-10-26 00:22:01'),
'missa']
]
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'),
1, None, None, None],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, None, None, None],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, None, None, None],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, None, None, None],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'),
2, None, None, None],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'),
2, [2], [2294.0758201547073], ['evento da prefeitura']],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, [1, 2], [1217.1198213850694, 279.6712398549538],
['show do tropykalia', 'evento da prefeitura']],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'),
3, [1, 2], [900.7798955139455, 1383.9587958381394],
['show do tropykalia', 'evento da prefeitura']],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'),
3, [1, 2], [770.188754517813, 1688.0786831571447],
['show do tropykalia', 'evento da prefeitura']]
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID,
EVENT_ID, DIST_EVENT, EVENT_TYPE
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_event_by_dist_and_time(
move_df, pois, radius=3000, time_window=7200, inplace=True
)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_home_by_id():
list_home = [
[39.984094, 116.319236, 1, 'rua da mae', 'quixiling'],
[40.013821, 116.306531, 2, 'rua da familia', 'quixeramoling']
]
move_df = MoveDataFrame(list_move)
home_df = DataFrame(
data=list_home,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, ADDRESS, CITY]
)
expected = DataFrame(
data=[
[1, 39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 0.0,
'rua da mae', 'quixiling'],
[1, 39.984559000000004, 116.326696,
Timestamp('2008-10-23 10:37:26'), 637.6902157810676,
'rua da mae', 'quixiling'],
[1, 40.002899, 116.32151999999999,
Timestamp('2008-10-23 10:50:16'), 2100.0535005951438,
'rua da mae', 'quixiling'],
[1, 40.016238, 116.30769099999999,
Timestamp('2008-10-23 11:03:06'), 3707.066732003998,
'rua da mae', 'quixiling'],
[2, 40.013814, 116.306525, | Timestamp('2008-10-23 11:58:33') | pandas.Timestamp |
import cProfile
import os
import pstats
import sys
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from .base_backend import ComputationalBackend
from .feature_tree import FeatureTree
from featuretools import variable_types
from featuretools.exceptions import UnknownFeature
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature,
TransformFeature
)
from featuretools.utils.gen_utils import (
get_relationship_variable_id,
make_tqdm_iterator
)
warnings.simplefilter('ignore', np.RankWarning)
warnings.simplefilter("ignore", category=RuntimeWarning)
class PandasBackend(ComputationalBackend):
def __init__(self, entityset, features):
assert len(set(f.entity.id for f in features)) == 1, \
"Features must all be defined on the same entity"
self.entityset = entityset
self.target_eid = features[0].entity.id
self.features = features
self.feature_tree = FeatureTree(entityset, features)
def __sizeof__(self):
return self.entityset.__sizeof__()
def calculate_all_features(self, instance_ids, time_last,
training_window=None, profile=False,
precalculated_features=None, ignored=None,
verbose=False):
"""
Given a list of instance ids and features with a shared time window,
generate and return a mapping of instance -> feature values.
Args:
instance_ids (list): List of instance id for which to build features.
time_last (pd.Timestamp): Last allowed time. Data from exactly this
time not allowed.
training_window (Timedelta, optional): Data older than
time_last by more than this will be ignored.
profile (bool): Enable profiler if True.
verbose (bool): Print output progress if True.
Returns:
pd.DataFrame : Pandas DataFrame of calculated feature values.
Indexed by instance_ids. Columns in same order as features
passed in.
"""
assert len(instance_ids) > 0, "0 instance ids provided"
self.instance_ids = instance_ids
self.time_last = time_last
if self.time_last is None:
self.time_last = datetime.now()
# For debugging
if profile:
pr = cProfile.Profile()
pr.enable()
if precalculated_features is None:
precalculated_features = {}
# Access the index to get the filtered data we need
target_entity = self.entityset[self.target_eid]
if ignored:
# TODO: Just want to remove entities if don't have any (sub)features defined
# on them anymore, rather than recreating
ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities
else:
ordered_entities = self.feature_tree.ordered_entities
necessary_columns = self.feature_tree.necessary_columns
eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=instance_ids,
entity_columns=necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
large_eframes_by_filter = None
if any([f.primitive.uses_full_entity for f in self.feature_tree.all_features if isinstance(f, TransformFeature)]):
large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features
large_eframes_by_filter = \
self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,
index_eid=self.target_eid,
instances=None,
entity_columns=large_necessary_columns,
time_last=time_last,
training_window=training_window,
verbose=verbose)
# Handle an empty time slice by returning a dataframe with defaults
if eframes_by_filter is None:
return self.generate_default_df(instance_ids=instance_ids)
finished_entity_ids = []
# Populate entity_frames with precalculated features
if len(precalculated_features) > 0:
for entity_id, precalc_feature_values in precalculated_features.items():
if entity_id in eframes_by_filter:
frame = eframes_by_filter[entity_id][entity_id]
eframes_by_filter[entity_id][entity_id] = pd.merge(frame,
precalc_feature_values,
left_index=True,
right_index=True)
else:
# Only features we're taking from this entity
# are precomputed
# Make sure the id variable is a column as well as an index
entity_id_var = self.entityset[entity_id].index
precalc_feature_values[entity_id_var] = precalc_feature_values.index.values
eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}
finished_entity_ids.append(entity_id)
# Iterate over the top-level entities (filter entities) in sorted order
# and calculate all relevant features under each one.
if verbose:
total_groups_to_compute = sum(len(group)
for group in self.feature_tree.ordered_feature_groups.values())
pbar = make_tqdm_iterator(total=total_groups_to_compute,
desc="Computing features",
unit="feature group")
if verbose:
pbar.update(0)
for filter_eid in ordered_entities:
entity_frames = eframes_by_filter[filter_eid]
large_entity_frames = None
if large_eframes_by_filter is not None:
large_entity_frames = large_eframes_by_filter[filter_eid]
# update the current set of entity frames with the computed features
# from previously finished entities
for eid in finished_entity_ids:
# only include this frame if it's not from a descendent entity:
# descendent entity frames will have to be re-calculated.
# TODO: this check might not be necessary, depending on our
# constraints
if not self.entityset.find_backward_path(start_entity_id=filter_eid,
goal_entity_id=eid):
entity_frames[eid] = eframes_by_filter[eid][eid]
# TODO: look this over again
# precalculated features will only be placed in entity_frames,
# and it's possible that that they are the only features computed
# for an entity. In this case, the entity won't be present in
# large_eframes_by_filter. The relevant lines that this case passes
# through are 136-143
if (large_eframes_by_filter is not None and
eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):
large_entity_frames[eid] = large_eframes_by_filter[eid][eid]
if filter_eid in self.feature_tree.ordered_feature_groups:
for group in self.feature_tree.ordered_feature_groups[filter_eid]:
if verbose:
pbar.set_postfix({'running': 0})
test_feature = group[0]
entity_id = test_feature.entity.id
input_frames_type = self.feature_tree.input_frames_type(test_feature)
input_frames = large_entity_frames
if input_frames_type == "subset_entity_frames":
input_frames = entity_frames
handler = self._feature_type_handler(test_feature)
result_frame = handler(group, input_frames)
output_frames_type = self.feature_tree.output_frames_type(test_feature)
if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:
index = entity_frames[entity_id].index
# If result_frame came from a uses_full_entity feature,
# and the input was large_entity_frames,
# then it's possible it doesn't contain some of the features
# in the output entity_frames
# We thus need to concatenate the existing frame with the result frame,
# making sure not to duplicate any columns
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in entity_frames[entity_id].columns]
entity_frames[entity_id] = pd.concat([entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:
index = large_entity_frames[entity_id].index
_result_frame = result_frame.reindex(index)
cols_to_keep = [c for c in _result_frame.columns
if c not in large_entity_frames[entity_id].columns]
large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],
_result_frame[cols_to_keep]],
axis=1)
if verbose:
pbar.update(1)
finished_entity_ids.append(filter_eid)
if verbose:
pbar.set_postfix({'running': 0})
pbar.refresh()
sys.stdout.flush()
pbar.close()
# debugging
if profile:
pr.disable()
ROOT_DIR = os.path.expanduser("~")
prof_folder_path = os.path.join(ROOT_DIR, 'prof')
if not os.path.exists(prof_folder_path):
os.mkdir(prof_folder_path)
with open(os.path.join(prof_folder_path, 'inst-%s.log' %
list(instance_ids)[0]), 'w') as f:
pstats.Stats(pr, stream=f).strip_dirs().sort_stats("cumulative", "tottime").print_stats()
df = eframes_by_filter[self.target_eid][self.target_eid]
# fill in empty rows with default values
missing_ids = [i for i in instance_ids if i not in
df[target_entity.index]]
if missing_ids:
default_df = self.generate_default_df(instance_ids=missing_ids,
extra_columns=df.columns)
df = df.append(default_df, sort=True)
df.index.name = self.entityset[self.target_eid].index
column_list = []
for feat in self.features:
column_list.extend(feat.get_feature_names())
return df[column_list]
def generate_default_df(self, instance_ids, extra_columns=None):
index_name = self.features[0].entity.index
default_row = []
default_cols = []
for f in self.features:
for name in f.get_feature_names():
default_cols.append(name)
default_row.append(f.default_value)
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(default_matrix,
columns=default_cols,
index=instance_ids)
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if isinstance(f, TransformFeature):
return self._calculate_transform_features
elif isinstance(f, DirectFeature):
return self._calculate_direct_features
elif isinstance(f, AggregationFeature):
return self._calculate_agg_features
elif isinstance(f, IdentityFeature):
return self._calculate_identity_features
else:
raise UnknownFeature(u"{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, entity_frames):
entity_id = features[0].entity.id
return entity_frames[entity_id][[f.get_name() for f in features]]
def _calculate_transform_features(self, features, entity_frames):
entity_id = features[0].entity.id
assert len(set([f.entity.id for f in features])) == 1, \
"features must share base entity"
assert entity_id in entity_frames
frame = entity_frames[entity_id]
for f in features:
# handle when no data
if frame.shape[0] == 0:
set_default_column(frame, f)
continue
# collect only the variables we need for this transformation
variable_data = [frame[bf.get_name()]
for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.primitive.uses_calc_time:
values = feature_func(*variable_data, time=self.time_last)
else:
values = feature_func(*variable_data)
# if we don't get just the values, the assignment breaks when indexes don't match
def strip_values_if_series(values):
if isinstance(values, pd.Series):
values = values.values
return values
if f.number_output_features > 1:
values = [strip_values_if_series(value) for value in values]
else:
values = [strip_values_if_series(values)]
update_feature_columns(f, frame, values)
return frame
def _calculate_direct_features(self, features, entity_frames):
entity_id = features[0].entity.id
parent_entity_id = features[0].parent_entity.id
assert entity_id in entity_frames and parent_entity_id in entity_frames
path = self.entityset.find_forward_path(entity_id, parent_entity_id)
assert len(path) == 1, \
"Error calculating DirectFeatures, len(path) > 1"
parent_df = entity_frames[parent_entity_id]
child_df = entity_frames[entity_id]
merge_var = path[0].child_variable.id
# generate a mapping of old column names (in the parent entity) to
# new column names (in the child entity) for the merge
col_map = {path[0].parent_variable.id: merge_var}
index_as_feature = None
for f in features:
if f.base_features[0].get_name() == path[0].parent_variable.id:
index_as_feature = f
# Sometimes entityset._add_multigenerational_links adds link variables
# that would ordinarily get calculated as direct features,
# so we make sure not to attempt to calculate again
base_names = f.base_features[0].get_feature_names()
for name, base_name in zip(f.get_feature_names(), base_names):
if name in child_df.columns:
continue
col_map[base_name] = name
# merge the identity feature from the parent entity into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if index_as_feature is not None:
merge_df.set_index(index_as_feature.get_name(), inplace=True,
drop=False)
else:
merge_df.set_index(merge_var, inplace=True)
new_df = pd.merge(left=child_df, right=merge_df,
left_on=merge_var, right_index=True,
how='left')
return new_df
def _calculate_agg_features(self, features, entity_frames):
test_feature = features[0]
entity = test_feature.entity
child_entity = test_feature.base_features[0].entity
assert entity.id in entity_frames and child_entity.id in entity_frames
frame = entity_frames[entity.id]
base_frame = entity_frames[child_entity.id]
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
features = [f for f in features if f.get_name()
not in frame.columns]
if not len(features):
return frame
# handle where
where = test_feature.where
if where is not None and not base_frame.empty:
base_frame = base_frame.loc[base_frame[where.get_name()]]
# when no child data, just add all the features to frame with nan
if base_frame.empty:
for f in features:
frame[f.get_name()] = np.nan
else:
relationship_path = self.entityset.find_backward_path(entity.id,
child_entity.id)
groupby_var = get_relationship_variable_id(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child entity included in that Timedelta
use_previous = test_feature.use_previous
if use_previous and not base_frame.empty:
# Filter by use_previous values
time_last = self.time_last
if use_previous.is_absolute():
time_first = time_last - use_previous
ti = child_entity.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.value
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multivariable and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
variable_id = f.base_features[0].get_name()
if variable_id not in to_agg:
to_agg[variable_id] = []
func = f.get_function()
# funcname used in case f.get_function() returns a string
# since strings don't have __name__
funcname = func
if callable(func):
# if the same function is being applied to the same
# variable twice, wrap it in a partial to avoid
# duplicate functions
if u"{}-{}".format(variable_id, id(func)) in agg_rename:
func = partial(func)
func.__name__ = str(id(func))
funcname = str(id(func))
to_agg[variable_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename[u"{}-{}".format(variable_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)
frame = pd.merge(left=frame, right=to_merge,
left_index=True,
right_index=True, how='left')
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_var can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(base_frame[groupby_var],
observed=True, sort=False).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns.ravel()]
to_merge = to_merge[list(agg_rename.values())]
# workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(categories=frame.index.categories)
to_merge.index = to_merge.index.astype(object).astype(categories)
frame = pd.merge(left=frame, right=to_merge,
left_index=True, right_index=True, how='left')
# Handle default values
fillna_dict = {}
for f in features:
feature_defaults = {name: f.default_value
for name in f.get_feature_names()}
fillna_dict.update(feature_defaults)
frame.fillna(fillna_dict, inplace=True)
# convert boolean dtypes to floats as appropriate
# pandas behavior: https://github.com/pydata/pandas/issues/3752
for f in features:
if (f.number_output_features == 1 and
f.variable_type == variable_types.Numeric and
frame[f.get_name()].dtype.name in ['object', 'bool']):
frame[f.get_name()] = frame[f.get_name()].astype(float)
return frame
def _can_agg(feature):
assert isinstance(feature, AggregationFeature)
base_features = feature.base_features
if feature.where is not None:
base_features = [bf.get_name() for bf in base_features
if bf.get_name() != feature.where.get_name()]
if feature.primitive.uses_calc_time:
return False
single_output = feature.primitive.number_output_features == 1
return len(base_features) == 1 and single_output
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
for f in feats:
func = f.get_function()
variable_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in variable_ids]
if f.primitive.uses_calc_time:
values = func(*args, time=time_last)
else:
values = func(*args)
if f.number_output_features == 1:
values = [values]
update_feature_columns(f, d, values)
return | pd.Series(d) | pandas.Series |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s=pd.Series(s.values,name=col)
dfIDUniqueCols=pd.concat([dfIDUniqueCols,s],axis=1)
dfIDUniqueCols=dfIDUniqueCols[dfID.columns]
except:
logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDUniqueCols
def getIDsFromID(ID='Objects.3S_XYZ_SEG_INFO.3S_L_6_KED_39_EL1.In.AL_S',dfID=None,matchCols=['B','C1','C2','C3','C4','C5','D'],any=False):
"""
returns IDs matching ID
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
IDsMatching=[]
s=dfID.loc[ID,:]
for ID,row in dfID.iterrows():
match=True
for col in [col for col in row.index.values if col in matchCols]:
#if str(row[col])!=str(s[col]):
if row[col]!=s[col]:
match=False
break
else:
if any:
break
if match:
IDsMatching.append(ID)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
#except:
# logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return sorted(IDsMatching)
def getLDSResVecDf(
ID # ResVec-Defining-Channel; i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In.AL_S / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.AL_S
,dfID
,TCsLDSResDf
,matchCols # i.e. ['B','C1','C2','C3','C4','C5','C6','D'] for Segs; i.e. ['B','C','D'] for Drks
):
"""
returns a df with LDSResChannels as columns (AL_S, ...); derived by Filtering columns from TCsLDSResDf and renaming them
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
IDs=getIDsFromID(ID=ID,dfID=dfID,matchCols=matchCols)
dfFiltered=TCsLDSResDf.filter(items=IDs)
colDct={}
for col in dfFiltered.columns:
m=re.search(pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except:
logger.error("{0:s}".format(logStr))
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetFirstAndLastValidIdx(df):
"""
returns (tFirst,tLast)
"""
for idx,col in enumerate(df.columns):
tF=df[col].first_valid_index()
tL=df[col].last_valid_index()
if idx==0:
tFirst=tF
tLast=tL
else:
if tF < tFirst:
tFirst=tF
if tL > tLast:
tLast=tL
return (tFirst,tLast)
def fGetIDSets(
dfID
,divNr #'7'
,pipelineNrLst #['43','44']
,fctIn=None # Funktion von ID die Falsch heraus gibt, wenn ID (doch) nicht in Menge sein soll
):
# returns Dct: key: Bezeichner einer ID-Menge; value: zugeh. IDs
IDSets={}
IDs=[]
for ID in sorted(dfID.index.unique()):
m=re.search(pID,ID)
if m != None:
C1= m.group('C1')
C2= m.group('C2')
C3= m.group('C3')
C4= m.group('C4')
C5= m.group('C5')
if C1 in [divNr] and C3 in pipelineNrLst: # u.a. SEG ErgVecs
IDs.append(ID)
elif C2 in [divNr] and C4 in pipelineNrLst:
IDs.append(ID)
elif C3 in [divNr] and C5 in pipelineNrLst: # FT, PTI, etc.
IDs.append(ID)
if fctIn != None:
IDs=[ID for ID in IDs if fctIn(ID)]
IDSets['IDs']=IDs
IDsAlarm=[ID for ID in IDs if re.search(pID,ID).group('E') == 'AL_S']
IDSets['IDsAlarm']=IDsAlarm
IDsAlarmSEG=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsAlarmSEG']=IDsAlarmSEG
IDsAlarmDruck=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsAlarmDruck']=IDsAlarmDruck
IDsStat=[ID for ID in IDs if re.search(pID,ID).group('E') == 'STAT_S']
IDSets['IDsStat']=IDsStat
IDsStatSEG=[ID for ID in IDsStat if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsStatSEG']=IDsStatSEG
IDsStatDruck=[ID for ID in IDsStat if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsStatDruck']=IDsStatDruck
###
IDsSb=[ID for ID in IDs if re.search(pID,ID).group('E') == 'SB_S']
IDSets['IDsSb']=IDsSb
IDsSbSEG=[ID for ID in IDsSb if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsSbSEG']=IDsSbSEG
IDsSbDruck=[ID for ID in IDsSb if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsSbDruck']=IDsSbDruck
###
IDsZHK=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZHKNR_S']
IDSets['IDsZHK']=IDsZHK
IDsZHKSEG=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsZHKSEG']=IDsZHKSEG
IDsZHKDruck=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsZHKDruck']=IDsZHKDruck
IDsFT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'FT']
IDSets['IDsFT']=IDsFT
IDsPT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'PTI']
IDSets['IDsPT']=IDsPT
IDsPT_BCIND=[ID for ID in IDs if re.search(pID,ID).group('C5') == 'PTI' and re.search(pID,ID).group('E') == 'BCIND_S' ]
IDSets['IDsPT_BCIND']=IDsPT_BCIND
### Schieber
IDsZUST=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZUST']
IDsZUST=sorted(IDsZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDsZUST']=IDsZUST
IDs_3S_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == '3S_FBG_ESCHIEBER']
IDs_3S_XYZ_ESCHIEBER=sorted(IDs_3S_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C6'))
IDSets['IDs_3S_XYZ_ESCHIEBER']=IDs_3S_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == 'FBG_ESCHIEBER']
IDs_XYZ_ESCHIEBER=sorted(IDs_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C5')) #
IDSets['IDs_XYZ_ESCHIEBER']=IDs_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER_Ohne_ZUST=[ID for ID in IDs_XYZ_ESCHIEBER if re.search(pID,ID).group('E') != 'ZUST']
IDs_XYZ_ESCHIEBER_Ohne_ZUST=sorted(IDs_XYZ_ESCHIEBER_Ohne_ZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDs_XYZ_ESCHIEBER_Ohne_ZUST']=IDs_XYZ_ESCHIEBER_Ohne_ZUST
IDsSchieberAlle=IDsZUST+IDs_XYZ_ESCHIEBER_Ohne_ZUST+IDs_3S_XYZ_ESCHIEBER
IDSets['IDsSchieberAlle']=IDsSchieberAlle
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlle if re.search('LAEUFT$',ID) == None]
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlleOhneLAEUFT if re.search('LAEUFT_NICHT$',ID) == None]
IDSets['IDsSchieberAlleOhneLAEUFT']=IDsSchieberAlleOhneLAEUFT
return IDSets
h5KeySep='/'
def fValueFct(x):
return pd.to_numeric(x,errors='ignore',downcast='float')
class AppLog():
"""
SIR 3S App Log (SQC Log)
Maintains a H5-File.
Existing H5-File will be deleted (if not initialized with h5File=...).
H5-Keys are:
* init
* lookUpDf
* lookUpDfZips (if initialized with zip7Files=...)
* Logfilenames praefixed by Log without extension
Attributes:
* h5File
* lookUpDf
zipName
logName
FirstTime (ScenTime - not #LogTime)
LastTime (ScenTime - mot #LogTime)
* lookUpDfZips
"""
TCsdfOPCFill=False # wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
@classmethod
def getTCsFromDf(cls,df,dfID=pd.DataFrame(),TCsdfOPCFill=TCsdfOPCFill):
"""
returns several TC-dfs from df
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
Args:
* df: a df with Log-Data
* columns: ['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']
* dfID
* index: ID
* erf. nur, wenn IDs nach Res1 und Res2 aufgeteilt werden sollen
* TCsdfOPCFill: if True (default): fill NaNs in this df
Time curve dfs: cols:
* Time (TCsdfOPC: ProcessTime, other: ScenTime)
* ID
* Value
Time curve dfs:
* TCsdfOPC
* TCsSirCalc
* TCsLDSIn
* TCsLDSRes (dfID empty) or TCsLDSRes1, TCsLDSRes2
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if not dfID.empty:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
if not dfID.empty:
df=df.merge(dfID,how='left',left_on='ID',right_index=True,suffixes=('','_r'))
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=df[(df['SubSystem'].str.contains('^OPC'))
### & ~(df['Value'].isnull()) # ueberfluessig, wenn df dies bereits erfuellt
][['ProcessTime','ID','Value']].pivot_table(index='ProcessTime', columns='ID', values='Value',aggfunc='last')
if TCsdfOPCFill:
for col in TCsdfOPC.columns:
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='ffill')
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='bfill')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=df[(df['SubSystem'].str.contains('^SirCalc')) | (df['SubSystem'].str.contains('^RTTM')) ][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^<-'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
if not dfID.empty:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_SEG_INFO'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_DRUCK'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
else:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
def __init__(self,logFile=None,zip7File=None,h5File=None,h5FileName=None,readWithDictReader=False,nRows=None,readWindowsLog=False):
"""
(re-)initialize
logFile:
wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
zipFile:
1. logFile wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
die Initialisierung mit zipFile ist identisch mit der Initialisierung mit logFile wenn logFile das 1. logFile des Zips ist
nach addZip7File(zip7File) - ggf. mehrfach fuer mehrere Zips:
koennen Daten mit self.get(...) gelesen werden (liefert 1 df)
koennen Daten mit self.getTCs(...) gelesen werden (liefert mehrere dfs in TC-Form)
koennen Daten mit self.getTCsSpecified(...) gelesen werden (liefert 1 df in TC-Form)
koennen Daten in TC-Form mit self.extractTCsToH5s(...) in separate H5s gelesen werden
mit self.getTCsFromH5s(...) koennen die TCs wieder gelesen werden
=== addZip7File(zip7File) - ggf. mehrfach - und extractTCsToH5s(...) sind Bestandteil einer 7Zip-Verarbeitung vor der eigentlichen Analyse ===
h5File:
die lookUp-Dfs vom H5-File werden gelesen
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt
die TC-H5-Files werden nicht auf Existenz geprüft oder gar gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
self.lookUpDf=pd.DataFrame()
self.lookUpDfZips=pd.DataFrame()
try:
if logFile != None and zip7File != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'3 Files (logFile and zip7File and h5File) specified.'))
elif logFile != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and zip7File) specified.'))
elif logFile != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and h5File) specified.'))
elif h5File != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (h5File and zip7File) specified.'))
elif logFile != None:
self.__initlogFile(logFile,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif zip7File != None:
self.__initzip7File(zip7File,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif h5File != None:
self.__initWithH5File(h5File)
else:
logger.debug("{0:s}{1:s}".format(logStr,'No File (logFile XOR zip7File XOR h5File) specified.'))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initlogFile(self,logFile,h5FileName=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with logFile
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn logFile nicht existiert ...
if not os.path.exists(logFile):
logger.debug("{0:s}logFile {1:s} not existing.".format(logStr,logFile))
else:
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
self.__initH5File(logFile,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initH5File(self,h5File,df,h5FileName=None):
"""
creates self.h5File and writes 'init'-Key Logfile df to it
Args:
* h5File: name of logFile or zip7File; the Dir is the Dir of the H5-File
* df
* h5FileName: the H5-FileName without Dir and Extension; if None (default), "Log ab ..." is used
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(h5File)
# H5-File
if h5FileName==None:
h5FileTail="Log ab {0:s}.h5".format(str(df['#LogTime'].min())).replace(':',' ').replace('-',' ')
else:
h5FileTail=h5FileName+'.h5'
self.h5File=os.path.join(h5FileHead,h5FileTail)
# wenn H5 existiert wird es geloescht
if os.path.exists(self.h5File):
os.remove(self.h5File)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileTail))
# init-Logfile schreiben
self.__toH5('init',df)
logger.debug("{0:s}'init'-Key Logfile done.".format(logStr))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initWithH5File(self,h5File,useRawHdfAPI=False):
"""
self.h5File=h5File
self.lookUpDf
self.lookUpDfZips
die lookUp-Dfs werden gelesen vom H5-File
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt, wenn diese H5-Files existieren
die TC-H5-Files werden nicht gelesen
der zum H5-File zugehoerige CVD-Filename wird belegt, wenn das H5-File existiert
das H5-File wird nicht gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(h5File):
self.h5File=h5File
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=h5Store['lookUpDf']
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=h5Store['lookUpDfZips']
else:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=pd.read_hdf(self.h5File, key='lookUpDf')
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=pd.read_hdf(self.h5File, key='lookUpDfZips')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
#TC-H5s
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
h5FileOPC=name+TCPost+'OPC'+ext
h5FileSirCalc=name+TCPost+'SirCalc'+ext
h5FileLDSIn=name+TCPost+'LDSIn'+ext
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
h5FileLDSRes=name+TCPost+'LDSRes'+ext
if os.path.exists(h5FileOPC):
self.h5FileOPC=h5FileOPC
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileOPC))
if os.path.exists(h5FileSirCalc):
self.h5FileSirCalc=h5FileSirCalc
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileSirCalc))
if os.path.exists(h5FileLDSIn):
self.h5FileLDSIn=h5FileLDSIn
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSIn))
if os.path.exists(h5FileLDSRes):
self.h5FileLDSRes=h5FileLDSRes
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes))
if os.path.exists(h5FileLDSRes1):
self.h5FileLDSRes1=h5FileLDSRes1
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes1))
if os.path.exists(h5FileLDSRes2):
self.h5FileLDSRes2=h5FileLDSRes2
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes2))
h5FileCVD=name+'_'+'CVD'+ext
if os.path.exists(h5FileCVD):
self.h5FileCVD=h5FileCVD
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileCVD))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getInitDf(self,useRawHdfAPI=False):
"""
returns InitDf from H5-File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=pd.DataFrame()
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'init' in h5KeysStripped:
df=h5Store['init']
else:
if 'init' in h5KeysStripped:
df=pd.read_hdf(self.h5File, key='init')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def __initzip7File(self,zip7File,h5FileName=None,nRows=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with zip7File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
zipFileDirname=os.path.dirname(zip7File)
logger.debug("{0:s}zipFileDirname: {1:s}".format(logStr,zipFileDirname))
aDfRead=False
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,len(allLogFiles)))
logger.debug("{0:s}getnames(): {1:s}.".format(logStr,str(allLogFiles)))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
logger.debug("{0:s}idx: {1:d} logFileNameInZip: {2:s}".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(zipFileDirname,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile) # logFileHead == dirname()
logger.debug("{0:s}idx: {1:d} logFileHead: {2:s} logFileTail: {3:s}".format(logStr,idx,logFileHead,logFileTail))
(name, ext)=os.path.splitext(logFile)
logger.debug("{0:s}idx: {1:d} name: {2:s} ext: {3:s}".format(logStr,idx,name,ext))
if logFileHead!='': # logFileHead == dirname()
if os.path.exists(logFileHead) and logFileHead not in extDirLstExistingLogged:
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert bereits.".format(logStr,idx,logFileHead))
extDirLstExistingLogged.append(logFileHead)
elif not os.path.exists(logFileHead):
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert noch nicht.".format(logStr,idx,logFileHead))
extDirLstTBDeleted.append(logFileHead)
# kein Logfile zu prozessieren ...
if ext == '':
continue
# Logfile prozessieren ...
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=zipFileDirname,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
aDfRead=True
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# wir wollen nur das 1. File lesen ...
if aDfRead:
break;
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
self.__initH5File(zip7File,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __toH5(self,key,df,useRawHdfAPI=False,updLookUpDf=False,logName='',zipName='',noDfStorage=False):
"""
write df with key to H5-File (if not noDfStorage)
Args:
* updLookUpDf: if True, self.lookUpDf is updated with
* zipName (the Zip of logFile)
* logName (the name of the logFile i.e. 20201113_0000004.log)
* FirstTime (the first ScenTime in df)
* LastTime (the last ScenTime in df)
self.lookUpDf is not wriiten to H5
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(self.h5File)
if not noDfStorage:
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
try:
h5Store.put(key,df)
except Exception as e:
logger.error("{0:s}Writing df with h5Key={1:s} to {2:s} FAILED!".format(logStr,key,h5FileTail))
raise e
else:
df.to_hdf(self.h5File, key=key)
logger.debug("{0:s}Writing df with h5Key={1:s} to {2:s} done.".format(logStr,key,h5FileTail))
if updLookUpDf:
s=df['ScenTime']#['#LogTime']
FirstTime=s.iloc[0]
LastTime=s.iloc[-1]
if self.lookUpDf.empty:
data={ 'zipName': [zipName]
,'logName': [logName]
,'FirstTime' : [FirstTime]
,'LastTime' : [LastTime]
}
self.lookUpDf = pd.DataFrame (data, columns = ['zipName','logName','FirstTime','LastTime'])
self.lookUpDf['zipName']=self.lookUpDf['zipName'].astype(str)
self.lookUpDf['logName']=self.lookUpDf['logName'].astype(str)
else:
data={ 'zipName': zipName
,'logName': logName
,'FirstTime' : FirstTime
,'LastTime' : LastTime
}
self.lookUpDf=self.lookUpDf.append(data,ignore_index=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __processALogFile(self,logFile=None,delimiter='\t',nRows=None,readWithDictReader=False,fValueFct=fValueFct,readWindowsLog=False):
"""
process logFile
Args:
* logFile: logFile to be processed
* nRows: number of logFile rows to be processed; default: None (:= all rows are processed); if readWithDictReader: last row is also processed
* readWithDictReader: if True, csv.DictReader is used; default: None (:= pd.read_csv is used)
Returns:
* df: logFile processed to df
* converted:
* #LogTime: to datetime
* ProcessTime: to datetime
* Value: to float64
* ID,Direction,SubSystem,LogLevel,State,Remark: to str
* new:
* ScenTime datetime
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=None
try:
with open(logFile,'r') as f:
pass
(logFileHead,logFileTail)=os.path.split(logFile)
if readWithDictReader:
restkey='+'
with open(logFile,"r") as csvFile: # 1. Zeile enthaelt die Ueberschrift
reader = csv.DictReader(csvFile,delimiter=delimiter,restkey=restkey)
logger.debug("{0:s}{1:s} csv.DictReader reader processed.".format(logStr,logFileTail))
# If a row has more fields than fieldnames, the remaining data is put in a list and stored with the fieldname specified by restkey.
colNames=reader.fieldnames
dcts = [dct for dct in reader] # alle Zeilen lesen
logger.debug("{0:s}{1:s} csv.DictReader-Ergebnis processed.".format(logStr,logFileTail))
if nRows!=None:
dcts=dcts[0:nRows]+[dcts[-1]]
# nur die Spaltennamen werden als row-Spalten erzeugt
rows = [[dct[colName] for colName in colNames] for dct in dcts]
logger.debug("{0:s}{1:s} rows processed.".format(logStr,logFileTail))
# die "ueberfluessigen" Spalten an die letzte Spalte dranhaengen
for i, dct in enumerate(dcts):
if restkey in dct:
restValue=dct[restkey]
restValueStr = delimiter.join(restValue)
newValue=rows[i][-1]+delimiter+restValueStr
#logger.debug("{0:s}{1:s} restValueStr: {2:s} - Zeile {3:10d}: {4:s} - neuer Wert letzte Spalte: {5:s}.".format(logStr,logFileTail,restValueStr,i,str(rows[i]),newValue))
rows[i][-1]=rows[i][-1]+newValue
logger.debug("{0:s}{1:s} restkey processed.".format(logStr,logFileTail))
index=range(len(rows))
df = pd.DataFrame(rows,columns=colNames,index=index)
else:
if nRows==None:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False)
else:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False,nrows=nRows)
logger.debug("{0:s}{1:s} pd.DataFrame processed.".format(logStr,logFileTail))
#logger.debug("{0:s}df: {1:s}".format(logStr,str(df)))
#LogTime
df['#LogTime']=pd.to_datetime(df['#LogTime'],unit='ms',errors='coerce') # NaT
#ProcessTime
df['ProcessTime']=pd.to_datetime(df['ProcessTime'],unit='ms',errors='coerce') # NaT
logger.debug("{0:s}{1:s} col ProcessTime processed.".format(logStr,logFileTail))
#Value
df['Value']=df.Value.str.replace(',', '.') # Exception: Line: 1137: <class 'AttributeError'>: Can only use .str accessor with string values!
df['Value']=fValueFct(df['Value'].values) # df['ValueProcessed'].apply(fValueFct)
logger.debug("{0:s}{1:s} col Value processed.".format(logStr,logFileTail))
#Strings
for col in ['ID','Direction','SubSystem','LogLevel','State','Remark']:
df[col]=df[col].astype(str)
logger.debug("{0:s}{1:s} String-cols processed.".format(logStr,logFileTail))
#1618249551621 STD CVD 1615442324000 p-p BEGIN_OF_NEW_CONTROL_VOLUME 6-10-SV1-RB~6-10-BID-RB NULL NULL # String in beiden Faellen (Linux und Windows) gleich?
#1618249551621 STD CVD <- 156 CV_ID
##ScenTime
## SubSystem Direction ProcessTime ID Value State Remark
## Linux ---
## 1615029280000 INF SQC Starting cycle for 2021-03-06 12:14:38.000
## 1615029280000 STD LDS MCL 1615029278000 Main cycle loop 06.03.2021 12:14:38.000 (ScenTime: Tag und Zeit in Klartext; Spalte ProcessTime ScenTime!)
## Windows ---
## 1618256150711 STD SQC 1615457121000 Main cycle loop 11:05:21.000 (ScenTime-Zeit in Klartext; Spalte ProcessTime ScenTime!)
dfScenTime=df[df['ID']=='Main cycle loop'][['ProcessTime']]
dfScenTime.rename(columns={'ProcessTime':'ScenTime'},inplace=True)
df=df.join(dfScenTime)
df['ScenTime']=df['ScenTime'].fillna(method='ffill')
df['ScenTime']=df['ScenTime'].fillna(method='bfill')
if df['ScenTime'].isnull().values.all():
logger.debug("{0:s}Keine Zeile mit ID=='Main cycle loop' gefunden. ScenTime zu #LogTime gesetzt.".format(logStr))
df['ScenTime']=df['#LogTime'] # wenn keine Zeile mit ID=='Main cycle loop' gefunden wurde, wird ScenTime zu #LogTime gesetzt
# finalisieren
df=df[['#LogTime','LogLevel','SubSystem','Direction','ProcessTime','ID','Value','ScenTime','State','Remark']]
logger.debug("{0:s}{1:s} processed with nRows: {2:s} (None if all).".format(logStr,logFileTail,str(nRows)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def rebuildLookUpDfZips(self,zip7Files,readWithDictReader=True,readWindowsLog=False):
"""
(re-)initialize with zip7Files
only persistent outcome is lookUpDfZips (Attribute and H5-Persistence)
lookUpdf is changed but not H5-stored
(Re-)Init with AppLog(h5File=...) after using rebuildLookUpDfZips to obtain old lookUpdf
main Usage of rebuildLookUpDfZips is to determine which zip7Files to add by i.e.:
zip7FilesToAdd=lx.lookUpDfZips[~(lx.lookUpDfZips['LastTime']<timeStartAusschnitt) & ~(lx.lookUpDfZips['FirstTime']>timeEndAusschnitt)].index.to_list()
"""
#noDfStorage=False
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#self.__initzip7File(zip7File=zip7Files[0],h5FileName=h5FileName,nRows=1,readWithDictReader=True)
for zip7File in zip7Files:
logger.info("{0:s}addZip7File: {1:s}".format(logStr,zip7File))
self.addZip7File(zip7File,firstsAndLastsLogsOnly=True,nRows=1,readWithDictReader=readWithDictReader,noDfStorage=True,readWindowsLog=readWindowsLog)
logger.debug("{0:s}lookUpDf: {1:s}".format(logStr,self.lookUpDf.to_string()))
df=self.lookUpDf.groupby(by='zipName').agg(['min', 'max'])
logger.debug("{0:s}df: {1:s}".format(logStr,df.to_string()))
minTime=df.loc[:,('FirstTime','min')]
maxTime=df.loc[:,('LastTime','max')]
minFileNr=df.loc[:,('logName','min')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
maxFileNr=df.loc[:,('logName','max')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
s=(maxTime-minTime)/(maxFileNr-minFileNr)
lookUpDfZips=s.to_frame().rename(columns={0:'TimespanPerLog'})
lookUpDfZips['NumOfFiles']=maxFileNr-minFileNr
lookUpDfZips['FirstTime']=minTime
lookUpDfZips['LastTime']=maxTime
lookUpDfZips['minFileNr']=minFileNr
lookUpDfZips['maxFileNr']=maxFileNr
lookUpDfZips=lookUpDfZips[['FirstTime','LastTime','TimespanPerLog','NumOfFiles','minFileNr','maxFileNr']]
# lookUpDfZips schreiben
self.lookUpDfZips=lookUpDfZips
self.__toH5('lookUpDfZips',self.lookUpDfZips)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def addZip7File(self,zip7File,firstsAndLastsLogsOnly=False,nRows=None,readWithDictReader=False,noDfStorage=False,readWindowsLog=False):
"""
add zip7File
Args:
* zipFile: zipFile which LogFiles shall be added
* Args for internal Usage:
* firstsAndLastsLogsOnly (True dann)
* nRows (1 dann)
* readWithDictReader (True dann)
d.h. es werden nur die ersten und letzten Logs pro Zip angelesen und dort auch nur die 1. und letzte Zeile und das mit DictReader
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
logger.debug("{0:s}zip7FileHead (leer wenn zip7 im selben Verz.): {1:s} zip7FileTail: {2:s}.".format(logStr,zip7FileHead,zip7FileTail))
logger.info("{0:s}zip7File: {1:s} ...".format(logStr,zip7File))
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
if firstsAndLastsLogsOnly:
if idx not in [0,1,allLogFilesLen-2,allLogFilesLen-1]:
#logger.debug("{0:s}idx: {1:d} item: {2:s} NOT processed ...".format(logStr,idx,logFileNameInZip))
continue
logger.info("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
logger.debug("{0:s}Log: {1:s} wird extrahiert ... ".format(logStr,logFileTail))
import lzma
try:
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
except lzma.LZMAError:
logger.warning("{0:s}Log: {1:s} nicht erfolgreich extrahiert - continue ... ".format(logStr,logFileTail))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}Log: {1:s} wurde extrahiert. ".format(logStr,logFileTail))
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# ...
(name, ext)=os.path.splitext(logFileTail)
key='Log'+name
if zip7FileHead != '':
zipName=os.path.join(os.path.relpath(zip7FileHead),zip7FileTail)
else:
zipName=zip7FileTail
# df schreiben
self.__toH5(key,df,updLookUpDf=True,logName=logFileTail,zipName=zipName,noDfStorage=noDfStorage)#os.path.join(os.path.relpath(zip7FileHead),zip7FileTail))
# danach gleich lookUpDf schreiben ...
self.__toH5('lookUpDf',self.lookUpDf,noDfStorage=noDfStorage)
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getTotalLogTime(self):
"""
Returns Tuple: firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal # Brutto-Logzeit, Netto-Logzeit, Summe aller Zeiten zwischen 2 Logdateien (sollte = Brutto-Netto sein)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Inhalt der Logs
tdTotal=pd.Timedelta('0 Seconds')
tdBetweenFilesTotal=pd.Timedelta('0 Seconds')
for idx,(index,row) in enumerate(self.lookUpDf.iterrows()):
if idx > 0:
tdBetweenFiles=row["FirstTime"]-lastTime
tdBetweenFilesTotal=tdBetweenFilesTotal+tdBetweenFiles
if tdBetweenFiles > pd.Timedelta('0 second'):
if tdBetweenFiles > pd.Timedelta('1 second'):
logger.info("{:s}Zeitdifferenz: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
pass
if tdBetweenFiles < pd.Timedelta('0 second'):
if tdBetweenFiles < -pd.Timedelta('1 second'):
pass
logger.info("{:s}Zeitueberlappung > 1s: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
td=row["LastTime"]-row["FirstTime"]
if type(td) == pd.Timedelta:
tdTotal=tdTotal+td
else:
print(index)# Fehler!
lastTime=row["LastTime"]
lastFile=row["logName"]
lastZip=row["zipName"]
firstTime=self.lookUpDf.iloc[0]["FirstTime"]
lastTime=self.lookUpDf.iloc[-1]["LastTime"]
tdTotalGross=lastTime-firstTime
tdTotalGross,tdTotal,tdBetweenFilesTotal
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal
def extractTCsToH5s(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill):
"""
extracts TC-Data (and CVD-Data) from H5 to seperate H5-Files (Postfixe: _TCxxx.h5 and _CVD.h5)
TCsdfOPCFill: wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
wenn timeStart != None: es wird an exisitierende .h5s angehaengt; sonst werden diese ueberschrieben
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# _TCxxx.h5 anlegen (OPC, SirCalc, LDSIn, LDSRes1, LDSRes2 (,LDSRes)) and _CVD.h5
# ueber alle dfs in H5 (unter Berücksichtigung von timeStart und timeEnd)
# lesen
# TC-Teilmenge ermitteln: 'ID','ProcessTime','ScenTime','SubSystem','Value','Direction'
# Zeilen mit 'Value' isnull() werden NICHT gelesen
# d.h. bei einer Logfile-Semantik welche durch NULL-Zeilen einen Wert auf (was auch immer) zuruecksetzt wuerde der Wert bei einer Stop-Plot-Ausgabe auf dem letzten Nicht-NULL Wert verharren ...
# ... zunaechst ...
# Untermengen bilden: ['TCsdfOPC','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2' (,'TCsdfLDSRes')]
# ... NULLen (NaNs) entstehen durch die Pivotierung mit Index = Time: nicht fuer alles Times (Obermenge) gibt es fuer jede ID Values
# speichern
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
self.h5FileOPC=name+TCPost+'OPC'+ext
self.h5FileSirCalc=name+TCPost+'SirCalc'+ext
self.h5FileLDSIn=name+TCPost+'LDSIn'+ext
if not dfID.empty:
# Attribute
self.h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
self.h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
# Komplement wird geloescht
h5FileLDSRes=name+TCPost+'LDSRes'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes):
os.remove(h5FileLDSRes)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes))
del self.h5FileLDSRes
except:
pass
else:
# Attribut
self.h5FileLDSRes=name+TCPost+'LDSRes'+ext
# Komplemente werden geloescht
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes1):
os.remove(h5FileLDSRes1)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes1))
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes2):
os.remove(h5FileLDSRes2)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes2))
del self.h5FileLDSRes1
del self.h5FileLDSRes2
except:
pass
self.h5FileCVD=name+'_'+'CVD'+ext
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysOPC=['TCsOPC'+x for x in h5KeysPost]
h5KeysSirCalc=['TCsSirCalc'+x for x in h5KeysPost]
h5KeysLDSIn=['TCsLDSIn'+x for x in h5KeysPost]
h5KeysLDSRes1=['TCsLDSRes1'+x for x in h5KeysPost]
h5KeysLDSRes2=['TCsLDSRes2'+x for x in h5KeysPost]
h5KeysLDSRes=['TCsLDSRes'+x for x in h5KeysPost]
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes,h5KeysCVD)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes,h5KeyCVD) in enumerate(h5KeysAll):
#H5-Write-Modus
if idx==0:
if timeStart!=None:
mode='a'
else:
mode='w'
else:
mode='a'
logger.info("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
# CVD -------------------------------------------------------------------------------------------------
dfCVD=df[df['SubSystem']=='CVD']
df=df[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
df['Value']=df['Value'].apply(lambda x: fTCCast(x))
df=df[~(df['Value'].isnull())]
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
logger.debug("{0:s}{1:s}".format(logStr,'Write ...'))
TCsdfOPC.to_hdf(self.h5FileOPC,h5KeyOPC, mode=mode)
TCsdfSirCalc.to_hdf(self.h5FileSirCalc,h5KeySirCalc, mode=mode)
TCsdfLDSIn.to_hdf(self.h5FileLDSIn,h5KeyLDSIn, mode=mode)
if not dfID.empty:
TCsdfLDSRes1.to_hdf(self.h5FileLDSRes1,h5KeyLDSRes1, mode=mode)
TCsdfLDSRes2.to_hdf(self.h5FileLDSRes2,h5KeyLDSRes2, mode=mode)
else:
TCsdfLDSRes.to_hdf(self.h5FileLDSRes,h5KeyLDSRes, mode=mode)
# ---
dfCVD.to_hdf(self.h5FileCVD,h5KeyCVD, mode=mode)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return
def shrinkH5File(self):
"""
die dfs werden geloescht im H5-File
extract TCs to H5s ### MUSS ### vorher gelaufen sein
nach shrinkH5File stehen im Master-H5 die eigentlichen Daten nicht mehr zur Verfuegung
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys()) # /Log20201216_0000001
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
for key in h5Keys:
if re.match('(^/Log)',key):
logger.debug("{0:s}key removed: {1:s}".format(logStr,str(key)))
h5Store.remove(key.replace(h5KeySep,''))
else:
logger.debug("{0:s}key NOT removed: {1:s}".format(logStr,str(key)))
with pd.HDFStore(self.h5File) as h5Store:
pass
shrinkCmd="ptrepack --chunkshape=auto --propindexes --complib=blosc "+self.h5File+" "+self.h5File+".Shrinked"
logger.debug("{0:s}shrinkCmd: {1:s}".format(logStr,shrinkCmd))
if os.path.exists(self.h5File+".Shrinked"):
os.remove(self.h5File+".Shrinked")
os.system(shrinkCmd)
os.remove(self.h5File)
os.rename(self.h5File+".Shrinked",self.h5File)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def get(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,useRawHdfAPI=False):
"""
returns df with filter_fct applied
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
for h5Key in h5Keys:
logger.debug("{0:s}Get (pd.HDFStore) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=h5Store[h5Key]
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
else:
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getFromZips(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,readWithDictReader=False,readWindowsLog=False):
"""
returns df from Zips
die Daten werden von den Zips gelesen: Log extrahieren, parsen, wieder loeschen
die Initalisierung muss mit AppLog(zip7Files=...) erfolgt sein da nur dann self.lookUpDfZips existiert
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
timeStart=pd.Timestamp(timeStart)
timeEnd=pd.Timestamp(timeEnd)
# zips die prozessiert werden muessen
dfLookUpZips=self.lookUpDfZips
if timeStart!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
for index, row in dfLookUpZips.iterrows():
zip7File=index
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
dTime=timeStart-row['FirstTime']
nStart = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())
dTime=timeEnd-timeStart
nDelta = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())+1
nEnd=nStart+nDelta
logger.debug("{0:s}zip7File: {1:s}: Start: {2:d}/{3:07d} End: {4:d}/{5:07d}".format(logStr,zip7FileTail
,nStart,nStart+row['minFileNr']
,nStart+nDelta,nStart+row['minFileNr']+nDelta))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
idxEff=0
for idx,logFileNameInZip in enumerate(allLogFiles):
if idx < nStart-idxEff or idx > nEnd+idxEff:
continue
logger.debug("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
idxEff+=1
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getTCs(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill,persistent=False,overwrite=True):
"""
returns TCs-dfs
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCKeys=['<KEY>','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2a','TCsdfLDSRes2b','TCsdfLDSRes2c']
if persistent:
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
#logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if set(TCKeys) & set(h5KeysStripped) == set(TCKeys):
if not overwrite:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - return aus H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC=pd.read_hdf(self.h5File,key='<KEY>')
TCsdfSirCalc=pd.read_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn=pd.read_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1=pd.read_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a=pd.read_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b=pd.read_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c=pd.read_hdf(self.h5File,key='TCsdfLDSRes2c')
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - sollen aber ueberschrieben werden ...".format(logStr,str(TCKeys)))
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren nicht (alle) ...".format(logStr,str(TCKeys)))
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
dfLst=[]
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
dfSingle=pd.read_hdf(self.h5File, key=h5Key)
dfSingle=dfSingle[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
dfSingle=dfSingle[~(dfSingle['Value'].isnull())]
dfLst.append(dfSingle)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
df=pd.concat(dfLst)
del dfLst
logger.debug("{0:s}{1:s}".format(logStr,'Concat finished. Filter & Pivot ...'))
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
if persistent:
logger.debug("{0:s}peristent: TCKeys {1:s} nach H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC.to_hdf(self.h5File,key='TCsdfOPC')
TCsdfSirCalc.to_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn.to_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1.to_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a.to_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b.to_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c.to_hdf(self.h5File,key='TCsdfLDSRes2c')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2#a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1
def getTCsFromH5s(self,timeStart=None,timeEnd=None, LDSResOnly=False, LDSResColsSpecified=None, LDSResTypeSpecified=None, timeShiftPair=None):
"""
returns several TC-dfs from TC-H5s:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
LDSResOnly:
TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfLDSRes
LDSResColsSpecified:
return in LDSRes df(s) only the specified cols
all cols are returned otherwise
LDSResTypeSpecified:
return TCsdfLDSRes1 (SEG) for 'SEG' or TCsdfLDSRes2 (Druck) for 'Druck'
both are returned otherwise
timeShiftPair: (preriod,freq): i.e. (1,'H'); if not None index is shifted
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
try:
self.h5FileLDSRes1
Res2=True
except:
Res2=False
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if Res2:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
h5KeysOPC=['TCsOPC'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysSirCalc=['TCsSirCalc'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSIn=['TCsLDSIn'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes1=['TCsLDSRes1'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes2=['TCsLDSRes2'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes=['TCsLDSRes'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes) in enumerate(h5KeysAll):
if not LDSResOnly:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=pd.read_hdf(self.h5FileOPC,h5KeyOPC)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=pd.read_hdf(self.h5FileSirCalc,h5KeySirCalc)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=pd.read_hdf(self.h5FileLDSIn,h5KeyLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=pd.read_hdf(self.h5FileLDSRes1,h5KeyLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=pd.read_hdf(self.h5FileLDSRes2,h5KeyLDSRes2)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=pd.read_hdf(self.h5FileLDSRes,h5KeyLDSRes)
if LDSResColsSpecified != None:
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s} {2:s}".format(logStr,'TCsdfLDSRes1 Filter ...',str(LDSResColsSpecified)))
TCsdfLDSRes1=TCsdfLDSRes1.filter(items=LDSResColsSpecified)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 Filter ...'))
TCsdfLDSRes2=TCsdfLDSRes2.filter(items=LDSResColsSpecified)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes Filter ...'))
TCsdfLDSRes=TCsdfLDSRes.filter(items=LDSResColsSpecified)
if idx==0:
if not LDSResOnly:
TCsdfOPCLst=[]
TCsdfSirCalcLst=[]
TCsdfLDSInLst=[]
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst=[]
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst=[]
else:
TCsdfLDSResLst=[]
#logger.debug("{0:s}Append ...".format(logStr))
if not LDSResOnly:
TCsdfOPCLst.append(TCsdfOPC)
TCsdfSirCalcLst.append(TCsdfSirCalc)
TCsdfLDSInLst.append(TCsdfLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst.append(TCsdfLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst.append(TCsdfLDSRes2)
else:
TCsdfLDSResLst.append(TCsdfLDSRes)
logger.debug("{0:s}Concat ...".format(logStr))
if not LDSResOnly:
TCsdfOPC=pd.concat(TCsdfOPCLst)
TCsdfSirCalc=pd.concat(TCsdfSirCalcLst)
TCsdfLDSIn=pd.concat(TCsdfLDSInLst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
logger.debug("{0:s}timeShift TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn by {1:d} {2:s} ...".format(logStr,period,freq))
for df in TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn:
df.index=df.index.shift(period,freq=freq)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1=pd.concat(TCsdfLDSRes1Lst)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2=pd.concat(TCsdfLDSRes2Lst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#for df in TCsdfLDSRes1:
logger.debug("{:s}timeShift LDSRes1 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes1.index[0],TCsdfLDSRes1.index[-1]))
TCsdfLDSRes1.index=TCsdfLDSRes1.index.shift(period,freq=freq)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#for df in TCsdfLDSRes2:
logger.debug("{:s}timeShift LDSRes2 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes2.index[0],TCsdfLDSRes2.index[-1]))
TCsdfLDSRes2.index=TCsdfLDSRes2.index.shift(period,freq=freq)
else:
TCsdfLDSRes= | pd.concat(TCsdfLDSResLst) | pandas.concat |
from bs4 import BeautifulSoup
import chardet
from datetime import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from serpapi import GoogleSearch
import statistics
import re
import requests
import time
from a0001_admin import clean_dataframe
from a0001_admin import retrieve_path
from a0001_admin import write_paths
from a0001_admin import work_completed
from a0001_admin import work_to_do
from query_pubs import query_pubs
from find_lat_lon import findLatLong
def aggregate_info(dataset):
"""
Save a .csv
"""
# write paths
write_paths()
# acquire information
if 'nsf' in dataset: df = acquire_nsf(dataset)
elif 'nih' in dataset: df = acquire_nih(dataset)
elif 'clinical' in dataset:
df = acquire_clinical(dataset)
list_clinical_trials(dataset)
elif 'patent' in dataset: df = acquire_patent(dataset)
elif 'pub' in dataset: df = acquire_pub(dataset)
# format and co-register fields of datasets
df = coregister(dataset)
# geolocate
df = geolocate(dataset)
# summarize
df = summarize(dataset)
# list unique
df = list_unique(dataset)
def acquire_clinical(dataset):
"""
from downloaded clinical data, aggregate
"""
name = 'acquire_clinical'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
# remove out of status trials and resave over acquired file
status_drop = ['Withdrawn', 'Terminated', 'Suspended']
status_drop.append('Temporarily not available')
status_drop.append('Unknown status')
for status in status_drop:
df = df[(df['Status'] != status)]
df = clean_dataframe(df)
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('Clinical df = ')
print(df)
return(df)
def acquire_downloaded(dataset):
"""
aggregate all files downloaded and saved in user provided
"""
df = pd.DataFrame()
path_term = dataset + '_downloaded'
path_src = os.path.join(retrieve_path(path_term))
for file in os.listdir(path_src):
file_src = os.path.join(path_src, file)
print('file_src = ' + str(file_src))
try:
df_src = pd.read_csv(file_src)
except:
with open(file_src, 'rb') as file:
print(chardet.detect(file.read()))
encodings = ['ISO-8859-1', 'unicode_escape', 'utf-8']
for encoding in encodings:
df_src = pd.read_csv(file_src, encoding=encoding)
break
df = df.append(df_src)
df = df.drop_duplicates()
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
print('file_dst = ' + file_dst )
df.to_csv(file_dst)
return(df)
def acquire_nsf(dataset):
"""
aggregate all files in user provided into a single csv
"""
name = 'acquire_nsf'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('NSF df = ')
print(df)
return(df)
def acquire_nih(dataset):
"""
from downloaded nih data, aggregate
"""
name = 'acquire_nih'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('NIH df = ')
print(df)
return(df)
def acquire_patent():
"""
"""
df = pd.DataFrame()
return(df)
def acquire_pub(dataset):
"""
"""
df = pd.DataFrame()
query_pubs(dataset)
return(df)
def coregister(dataset):
"""
add reference value for year and value
"""
try:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
df = clean_dataframe(df)
except:
df = pd.DataFrame()
return(df)
if 'nsf' in dataset: df = coregister_nsf(dataset, df)
if 'nih' in dataset: df = coregister_nih(dataset, df)
if 'clinical' in dataset: df = coregister_clinical(dataset, df)
else: return(df)
return(df)
def coregister_clinical(dataset, df):
"""
add year and value as enrollment
"""
print('df = ')
print(df)
name = 'coregister_clinical'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['Start Date']):
print('date = ')
print(date)
try:
date = date.replace('"', '')
date_split = date.split(' ')
year = date_split[-1]
except:
year = 0
years.append(year)
values = []
for item in list(df['Enrollment']):
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def coregister_nih(dataset, df):
"""
"""
print('df = ')
print(df)
name = 'coregister_nih'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['Fiscal Year']):
year = date
years.append(year)
values = []
for item in list(df['Direct Cost IC']):
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def coregister_nsf(dataset, df):
"""
"""
print('df = ')
print(df)
name = 'coregister_nsf'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['StartDate']):
date_split = date.split('/')
year = date_split[-1]
years.append(year)
values = []
for item in list(df['AwardedAmountToDate']):
item = item.replace('$', '')
item = item.replace('"', '')
item = item.replace(',', '')
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def geolocate(dataset):
"""
"""
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
df = clean_dataframe(df)
if 'nsf' in dataset:
name = 'geolocate_nsf'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_nsf(dataset, df)
work_completed(name, 1)
elif 'nih' in dataset:
name = 'geolocate_nih'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_nih(dataset, df)
work_completed(name, 1)
elif 'clinical' in dataset:
name = 'geolocate_clinical'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_clinical(dataset, df)
work_completed(name, 1)
else:
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time : 09.04.21 09:54
# @Author : sing_sd
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import src.common_functions as cf
import csv
import ais
from datetime import datetime, timedelta, timezone
import re
vb_dir = os.path.dirname(__file__)
data_dir = os.path.join(vb_dir, "resources/")
headers = ['x', 'y', 'cog', 'sog', 'time', 'mmsi', "nav_status", "ship_type", "destination"]
plt.rcParams.update({'font.size': 12})
def main():
# data = generate_short_data(data_len=100000)
# filename = 'ais_data_rostock_2020.csv' # 'ais_data_rostock_2016.csv'
# generate_processed_data(filename)
filename = "ais_data_rostock_2020_processed.csv" # "ais_data_rostock_2016_processed.csv"
plot_data(filename)
# generate_rostock_gedsar_dataset(filename)
# decode_data()
def plot_data(filename):
mpl.rcParams['agg.path.chunksize'] = 10000
fig, axs = plt.subplots(1, 1)
fig.set_size_inches([8, 6])
plt.pause(0.0001)
with open(data_dir + filename, "r") as f:
print("start")
data = pd.read_csv(f)
print("data loaded")
data = data.astype({'x': 'float', 'y': 'float', 'cog': 'float', 'sog': 'float', 'time': 'float', 'mmsi': 'int',
'nav_status': 'float', 'ship_type': 'float', 'destination': 'str'})
print("Data loaded, total messages = ", len(data))
WINDOW = [11.5, 54.2, 12.5, 54.5]
SOG_LIMIT = [0, 30]
NAV_STATUS = 0.0
SHIP_TYPES = [0, 90]
idx = cf.get_filtered_data_idx(data, WINDOW, SOG_LIMIT, NAV_STATUS, SHIP_TYPES)
data = data.iloc[idx, :]
data = data.reset_index(drop=True)
print("Data loaded, total messages within window = ", len(data))
# axs.plot(data.iloc[:, 0], data.iloc[:, 1], 'b.', markersize=0.1, alpha=0.5) #, linestyle="solid", linewidth=0.1, alpha=0.5
SAMPLING_TIME = 60
for mmsi in data.mmsi.unique():
data_mmsi = data.iloc[np.array(data['mmsi'] == mmsi), :]
data_mmsi = data_mmsi.reset_index(drop=True)
# nr_data = int(np.ceil((np.array(data_mmsi.iloc[-1]['time']) - data_mmsi.iloc[0]['time']) / SAMPLING_TIME) + 1)
overall_data = np.full(shape=(5000 * len(data_mmsi), 2), fill_value=np.nan)
data_mmsi['time_idx'] = data_mmsi.iloc[0]['time']
data_mmsi['time_idx'] = np.ceil((data_mmsi['time'] - data_mmsi['time_idx']) / SAMPLING_TIME)
overall_data[np.array(data_mmsi['time_idx'], dtype=np.int), 0:2] = np.array(data_mmsi[['x', 'y']])
axs.plot(overall_data[:, 0], overall_data[:, 1],
linestyle="-", color="blue", linewidth=0.3, alpha=5)
plt.pause(0.0001)
axs.set_xlabel('Longitude [deg]')
axs.set_ylabel('Latitude [deg]')
axs.set_xlim(xmin=11.5, xmax=12.5)
axs.set_ylim(ymin=54.2, ymax=54.5)
plt.pause(0.001)
plt.savefig("./resources/dataset2020.png")
plt.savefig("./resources/dataset2020.pdf")
plt.show()
def generate_processed_data(filename):
with open(data_dir + filename, "r") as f:
print("start")
data_pd = pd.read_csv(f)
print("data loaded")
data_pd = data_pd.astype({'x': 'float', 'y': 'float', 'cog': 'float', 'sog': 'float', 'time': 'float', 'mmsi': 'int',
'nav_status': 'float', 'ship_type': 'str', 'destination': 'str'})
# data = np.array(data_pd)
i = 1
total= len(data_pd.mmsi.unique())
for mmsi in data_pd.mmsi.unique():
idx = data_pd.iloc[:, 5] == mmsi
data_pd.iloc[idx, -1] = "U"
# for ship types
value = np.unique(data_pd["ship_type"].loc[idx])
if len(value) > 1:
value = value[value != "nan"]
data_pd.iloc[idx, -2] = value[0]
else:
data_pd.iloc[idx, 0] = -1 # delete those rows that does not have ship type by putting x = np.nan
print(i, " out of ", total)
i += 1
data_pd = data_pd[data_pd.x > 0]
if sum(data_pd.iloc[:, -1] == "nan") + sum(data_pd.iloc[:, -2] == "nan")> 0:
print("there are nan values")
exit(0)
data_pd["ship_type"] = data_pd["ship_type"].astype("float64")
data_pd.to_csv(data_dir + "ais_data_rostock_2020_processed.csv", index=False)
# plot_graph(data)
def generate_short_data(data_len=10000):
data = pd.DataFrame(columns=headers)
data = data.astype({'x': 'float', 'y': 'float', 'cog': 'float', 'sog': 'float', 'time': 'float', 'mmsi': 'int',
'nav_status': 'float', 'ship_type': 'str', 'destination': 'str'})
# float helps in interpolation of these features
try:
with open(data_dir + 'ais_data_rostock_2016.csv', "r") as my_csv:
reader = csv.reader(my_csv)
print("first row", next(reader))
for i in range(data_len):
try:
next_row = next(reader)
data = data.append(pd.Series(next_row, index=data.columns), ignore_index=True)
except Exception as e:
print(str(e))
data.to_csv(data_dir + 'ais_data_rostock_2019_short.csv', index=False)
exit(0)
# data = genfromtxt(data_dir+'ais_data_rostock_2019.csv', delimiter=',')
data.to_csv(data_dir + 'ais_data_rostock_2016_short.csv', index=False)
# np.savetxt(data_dir+'ais_data_rostock_2019_short.csv', data, delimiter=',')
except Exception as e:
print(str(e))
return data
def generate_rostock_gedsar_dataset(filename):
fig, axs = plt.subplots(1, 1)
fig.set_size_inches([8, 6])
plt.pause(0.0001)
with open(data_dir + filename, "r") as f:
print("start")
data = pd.read_csv(f)
print("data loaded")
data = data.astype({'x': 'float', 'y': 'float', 'cog': 'float', 'sog': 'float', 'time': 'float', 'mmsi': 'int',
'nav_status': 'float', 'ship_type': 'float', 'destination': 'str'})
WINDOW = [11, 54, 13, 56]
SOG_LIMIT = [0, 30]
NAV_STATUS = 0.0
SHIP_TYPES = [60, 61]
idx = cf.get_filtered_data_idx(data, WINDOW, SOG_LIMIT, NAV_STATUS, SHIP_TYPES)
data = data.iloc[idx, :]
data_rg = pd.DataFrame(columns=data.columns)
filename = "ais_data_rostock_gedsar_2016.csv"
print("Data loaded, total messages within window = ", len(data))
for mmsi in data.mmsi.unique():
if mmsi in [219000479,218780000]:
data_mmsi = data.iloc[np.array(data['mmsi'] == mmsi), :]
data_mmsi = data_mmsi.reset_index(drop=True)
data_rg = pd.concat([data_rg,data_mmsi], ignore_index=True)
data_rg.to_csv(data_dir+filename, index=False)
plt.plot(data_rg["x"], data_rg["y"])
plt.pause(0.0001)
plt.show()
def decode_data():
WINDOW = (11, 54, 13, 56)
np.random.seed(10)
# names = [i for i in range(20)] # chnage .. when using other input files
headers = ['x', 'y', 'cog', 'sog', 'time', 'mmsi', "nav_status", "ship_type", "destination"]
data = pd.DataFrame(columns=headers)
data = data.astype({'x': 'float', 'y': 'float', 'cog': 'float', 'sog': 'float', 'time': 'float', 'mmsi': 'int',
'nav_status': 'float', 'ship_type': 'float',
'destination': 'str'}) # float helps in interpolation of these features
filename = 'ais_data_rostock_2020.csv'
data.to_csv(filename, index=False)
# insert a dummy row
to_append = [0, 0, 0, 0, 0, 0, 0, 0, 0]
data = data.append(pd.Series(to_append, index=data.columns), ignore_index=True)
txt_files = sorted(os.listdir(data_dir+"/AISHUB2020/"))
for file in txt_files:
with open(data_dir+"/AISHUB2020/"+file, "r") as f:
aismsg = None
for line_num, i_line in enumerate(f.readlines()): # [:3000] f.readlines()
try:
splitted_line = i_line.split('\t')
ais_timestamp = splitted_line[0]
nmea_msg_split = splitted_line[1].split(",")
if nmea_msg_split[1] == "2":
if nmea_msg_split[2] == "1":
multi_line_nmea = nmea_msg_split[5]
if nmea_msg_split[2] == "2":
multi_line_nmea += nmea_msg_split[5]
# print(multi_line_nmea)
aismsg = ais.decode(multi_line_nmea, 2)
# print(aismsg)
multi_line_nmea = ""
else:
aismsg = ais.decode(nmea_msg_split[5], 0)
if aismsg is not None or (aismsg['id'] in [1, 2, 3, 5]): # or aismsg['id'] == 18 or aismsg['id'] == 19
# if aismsg["mmsi"] == 219423000: #244239000: # getting data for a single trajectory
if aismsg['id'] in [1, 2, 3]:
if not ((aismsg['x'] < WINDOW[0]) or (aismsg['y'] < WINDOW[1]) or (aismsg['x'] > WINDOW[2]) or (
aismsg['y'] > WINDOW[3])): # aismsg['sog'] < 6 or (aismsg['sog'] > 50)
to_append = [aismsg['x'], aismsg['y'], aismsg['cog'], aismsg['sog'], ais_timestamp,
aismsg['mmsi'], aismsg["nav_status"], np.nan,
np.nan] # class_name = nmea_msg_split[4]
data.iloc[0] = | pd.Series(to_append, index=data.columns) | pandas.Series |
#!/usr/bin/env python3
import argparse
import collections
import copy
import datetime
import functools
import glob
import json
import logging
import math
import operator
import os
import os.path
import re
import sys
import typing
import warnings
import matplotlib
import matplotlib.cm
import matplotlib.dates
import matplotlib.pyplot
import matplotlib.ticker
import networkx
import numpy
import pandas
import tabulate
import tqdm
import rows.console
import rows.load
import rows.location_finder
import rows.model.area
import rows.model.carer
import rows.model.datetime
import rows.model.historical_visit
import rows.model.history
import rows.model.json
import rows.model.location
import rows.model.metadata
import rows.model.past_visit
import rows.model.problem
import rows.model.rest
import rows.model.schedule
import rows.model.service_user
import rows.model.visit
import rows.parser
import rows.plot
import rows.routing_server
import rows.settings
import rows.sql_data_source
def handle_exception(exc_type, exc_value, exc_traceback):
"""Logs uncaught exceptions"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
__COMMAND = 'command'
__PULL_COMMAND = 'pull'
__INFO_COMMAND = 'info'
__SHOW_WORKING_HOURS_COMMAND = 'show-working-hours'
__COMPARE_BOX_PLOTS_COMMAND = 'compare-box-plots'
__COMPARE_DISTANCE_COMMAND = 'compare-distance'
__COMPARE_WORKLOAD_COMMAND = 'compare-workload'
__COMPARE_QUALITY_COMMAND = 'compare-quality'
__COMPARE_COST_COMMAND = 'compare-cost'
__CONTRAST_WORKLOAD_COMMAND = 'contrast-workload'
__COMPARE_PREDICTION_ERROR_COMMAND = 'compare-prediction-error'
__COMPARE_BENCHMARK_COMMAND = 'compare-benchmark'
__COMPARE_BENCHMARK_TABLE_COMMAND = 'compare-benchmark-table'
__COMPARE_LITERATURE_TABLE_COMMAND = 'compare-literature-table'
__COMPARE_THIRD_STAGE_PLOT_COMMAND = 'compare-third-stage-plot'
__COMPARE_THIRD_STAGE_TABLE_COMMAND = 'compare-third-stage-table'
__COMPARE_THIRD_STAGE_SUMMARY_COMMAND = 'compare-third-stage-summary'
__COMPARE_QUALITY_OPTIMIZER_COMMAND = 'compare-quality-optimizer'
__COMPUTE_RISKINESS_COMMAND = 'compute-riskiness'
__COMPARE_DELAY_COMMAND = 'compare-delay'
__TYPE_ARG = 'type'
__ACTIVITY_TYPE = 'activity'
__VISITS_TYPE = 'visits'
__COMPARE_TRACE_COMMAND = 'compare-trace'
__CONTRAST_TRACE_COMMAND = 'contrast-trace'
__COST_FUNCTION_TYPE = 'cost_function'
__DEBUG_COMMAND = 'debug'
__AREA_ARG = 'area'
__FROM_ARG = 'from'
__TO_ARG = 'to'
__FILE_ARG = 'file'
__DATE_ARG = 'date'
__BASE_FILE_ARG = 'base-file'
__CANDIDATE_FILE_ARG = 'candidate-file'
__SOLUTION_FILE_ARG = 'solution'
__PROBLEM_FILE_ARG = 'problem'
__OUTPUT_PREFIX_ARG = 'output_prefix'
__OPTIONAL_ARG_PREFIX = '--'
__BASE_SCHEDULE_PATTERN = 'base_schedule_pattern'
__CANDIDATE_SCHEDULE_PATTERN = 'candidate_schedule_pattern'
__SCHEDULE_PATTERNS = 'schedule_patterns'
__LABELS = 'labels'
__OUTPUT = 'output'
__ARROWS = 'arrows'
__FILE_FORMAT_ARG = 'output_format'
__color_map = matplotlib.pyplot.get_cmap('tab20c')
FOREGROUND_COLOR = __color_map.colors[0]
FOREGROUND_COLOR2 = 'black'
def get_or_raise(obj, prop):
value = getattr(obj, prop)
if not value:
raise ValueError('{0} not set'.format(prop))
return value
def get_date_time(value):
date_time = datetime.datetime.strptime(value, '%Y-%m-%d')
return date_time
def get_date(value):
value_to_use = get_date_time(value)
return value_to_use.date()
def configure_parser():
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='Robust Optimization '
'for Workforce Scheduling command line utility')
subparsers = parser.add_subparsers(dest=__COMMAND)
pull_parser = subparsers.add_parser(__PULL_COMMAND)
pull_parser.add_argument(__AREA_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FROM_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TO_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT_PREFIX_ARG)
info_parser = subparsers.add_parser(__INFO_COMMAND)
info_parser.add_argument(__FILE_ARG)
compare_distance_parser = subparsers.add_parser(__COMPARE_DISTANCE_COMMAND)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __PROBLEM_FILE_ARG, required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __SCHEDULE_PATTERNS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __LABELS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
compare_workload_parser = subparsers.add_parser(__COMPARE_WORKLOAD_COMMAND)
compare_workload_parser.add_argument(__PROBLEM_FILE_ARG)
compare_workload_parser.add_argument(__BASE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__CANDIDATE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
debug_parser = subparsers.add_parser(__DEBUG_COMMAND)
# debug_parser.add_argument(__PROBLEM_FILE_ARG)
# debug_parser.add_argument(__SOLUTION_FILE_ARG)
compare_trace_parser = subparsers.add_parser(__COMPARE_TRACE_COMMAND)
compare_trace_parser.add_argument(__PROBLEM_FILE_ARG)
compare_trace_parser.add_argument(__FILE_ARG)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __ARROWS, type=bool, default=False)
contrast_workload_parser = subparsers.add_parser(__CONTRAST_WORKLOAD_COMMAND)
contrast_workload_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_workload_parser.add_argument(__BASE_FILE_ARG)
contrast_workload_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TYPE_ARG)
compare_prediction_error_parser = subparsers.add_parser(__COMPARE_PREDICTION_ERROR_COMMAND)
compare_prediction_error_parser.add_argument(__BASE_FILE_ARG)
compare_prediction_error_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser = subparsers.add_parser(__CONTRAST_TRACE_COMMAND)
contrast_trace_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_trace_parser.add_argument(__BASE_FILE_ARG)
contrast_trace_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
show_working_hours_parser = subparsers.add_parser(__SHOW_WORKING_HOURS_COMMAND)
show_working_hours_parser.add_argument(__FILE_ARG)
show_working_hours_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_quality_parser = subparsers.add_parser(__COMPARE_QUALITY_COMMAND)
compare_quality_optimizer_parser = subparsers.add_parser(__COMPARE_QUALITY_OPTIMIZER_COMMAND)
compare_quality_optimizer_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_COST_COMMAND)
compare_benchmark_parser = subparsers.add_parser(__COMPARE_BENCHMARK_COMMAND)
compare_benchmark_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_LITERATURE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_BENCHMARK_TABLE_COMMAND)
subparsers.add_parser(__COMPUTE_RISKINESS_COMMAND)
subparsers.add_parser(__COMPARE_DELAY_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_PLOT_COMMAND)
compare_box_parser = subparsers.add_parser(__COMPARE_BOX_PLOTS_COMMAND)
compare_box_parser.add_argument(__PROBLEM_FILE_ARG)
compare_box_parser.add_argument(__BASE_FILE_ARG)
compare_box_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
third_stage_summary_parser = subparsers.add_parser(__COMPARE_THIRD_STAGE_SUMMARY_COMMAND)
third_stage_summary_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
return parser
def split_delta(delta: datetime.timedelta) -> typing.Tuple[int, int, int, int]:
days = int(delta.days)
hours = int((delta.total_seconds() - 24 * 3600 * days) // 3600)
minutes = int((delta.total_seconds() - 24 * 3600 * days - 3600 * hours) // 60)
seconds = int(delta.total_seconds() - 24 * 3600 * days - 3600 * hours - 60 * minutes)
assert hours < 24
assert minutes < 60
assert seconds < 60
return days, hours, minutes, seconds
def get_time_delta_label(total_travel_time: datetime.timedelta) -> str:
days, hours, minutes, seconds = split_delta(total_travel_time)
time = '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
if days == 0:
return time
elif days == 1:
return '1 day ' + time
else:
return '{0} days '.format(days) + time
def pull(args, settings):
area_code = get_or_raise(args, __AREA_ARG)
from_raw_date = get_or_raise(args, __FROM_ARG)
to_raw_date = get_or_raise(args, __TO_ARG)
output_prefix = get_or_raise(args, __OUTPUT_PREFIX_ARG)
console = rows.console.Console()
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
location_cache = rows.location_finder.FileSystemCache(settings)
location_finder = rows.location_finder.MultiModeLocationFinder(location_cache, user_tag_finder, timeout=5.0)
data_source = rows.sql_data_source.SqlDataSource(settings, console, location_finder)
from_date_time = get_date_time(from_raw_date)
to_date_time = get_date_time(to_raw_date)
current_date_time = from_date_time
while current_date_time <= to_date_time:
schedule = data_source.get_past_schedule(rows.model.area.Area(code=area_code), current_date_time.date())
for visit in schedule.visits:
visit.visit.address = None
output_file = '{0}_{1}.json'.format(output_prefix, current_date_time.date().strftime('%Y%m%d'))
with open(output_file, 'w') as output_stream:
json.dump(schedule, output_stream, cls=rows.model.json.JSONEncoder)
current_date_time += datetime.timedelta(days=1)
def get_travel_time(schedule, user_tag_finder):
routes = schedule.routes()
total_travel_time = datetime.timedelta()
with rows.plot.create_routing_session() as session:
for route in routes:
visit_it = iter(route.visits)
current_visit = next(visit_it, None)
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
while current_visit:
prev_location = current_location
current_visit = next(visit_it, None)
if not current_visit:
break
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
travel_time_sec = session.distance(prev_location, current_location)
if travel_time_sec:
total_travel_time += datetime.timedelta(seconds=travel_time_sec)
return total_travel_time
def info(args, settings):
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
schedule_file = get_or_raise(args, __FILE_ARG)
schedule_file_to_use = os.path.realpath(os.path.expandvars(schedule_file))
schedule = rows.load.load_schedule(schedule_file_to_use)
carers = {visit.carer for visit in schedule.visits}
print(get_travel_time(schedule, user_tag_finder), len(carers), len(schedule.visits))
def compare_distance(args, settings):
schedule_patterns = getattr(args, __SCHEDULE_PATTERNS)
labels = getattr(args, __LABELS)
output_file = getattr(args, __OUTPUT, 'distance')
output_file_format = getattr(args, __FILE_FORMAT_ARG)
data_frame_file = 'data_frame_cache.bin'
if os.path.isfile(data_frame_file):
data_frame = pandas.read_pickle(data_frame_file)
else:
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
store = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for label, schedule_pattern in zip(labels, schedule_patterns):
for schedule_path in glob.glob(schedule_pattern):
schedule = rows.load.load_schedule(schedule_path)
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(schedule)
frame = rows.plot.get_schedule_data_frame(schedule, problem, duration_estimator, distance_estimator)
visits = frame['Visits'].sum()
carers = len(frame.where(frame['Visits'] > 0))
idle_time = frame['Availability'] - frame['Travel'] - frame['Service']
idle_time[idle_time < pandas.Timedelta(0)] = pandas.Timedelta(0)
overtime = frame['Travel'] + frame['Service'] - frame['Availability']
overtime[overtime < pandas.Timedelta(0)] = pandas.Timedelta(0)
store.append({'Label': label,
'Date': schedule.metadata.begin,
'Availability': frame['Availability'].sum(),
'Travel': frame['Travel'].sum(),
'Service': frame['Service'].sum(),
'Idle': idle_time.sum(),
'Overtime': overtime.sum(),
'Carers': carers,
'Visits': visits})
data_frame = pandas.DataFrame(store)
data_frame.sort_values(by=['Date'], inplace=True)
data_frame.to_pickle(data_frame_file)
condensed_frame = pandas.pivot(data_frame, columns='Label', values='Travel', index='Date')
condensed_frame['Improvement'] = condensed_frame['2nd Stage'] - condensed_frame['3rd Stage']
condensed_frame['RelativeImprovement'] = condensed_frame['Improvement'] / condensed_frame['2nd Stage']
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, ax = matplotlib.pyplot.subplots(1, 1, sharex=True)
try:
width = 0.20
dates = data_frame['Date'].unique()
time_delta_convert = rows.plot.TimeDeltaConverter()
indices = numpy.arange(1, len(dates) + 1, 1)
handles = []
position = 0
for color_number, label in enumerate(labels):
data_frame_to_use = data_frame[data_frame['Label'] == label]
handle = ax.bar(indices + position * width,
time_delta_convert(data_frame_to_use['Travel']),
width,
color=color_map.colors[color_number],
bottom=time_delta_convert.zero)
handles.append(handle)
position += 1
ax.yaxis_date()
yaxis_converter = rows.plot.CumulativeHourMinuteConverter()
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yaxis_converter))
ax.set_ylabel('Total Travel Time [hh:mm:ss]')
ax.set_yticks([time_delta_convert.zero + datetime.timedelta(seconds=seconds) for seconds in range(0, 30 * 3600, 4 * 3600 + 1)])
ax.set_xlabel('Day of October 2017')
translate_labels = {
'3rd Stage': '3rd Stage',
'Human Planners': 'Human Planners'
}
labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
rows.plot.add_legend(ax, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -0.25)) # , bbox_to_anchor=(0.5, -1.1)
figure.tight_layout()
figure.subplots_adjust(bottom=0.20)
rows.plot.save_figure(output_file, output_file_format)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
# figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(3, 1, sharex=True)
# try:
# width = 0.20
# dates = data_frame['Date'].unique()
# time_delta_convert = rows.plot.TimeDeltaConverter()
# indices = numpy.arange(1, len(dates) + 1, 1)
#
# handles = []
# position = 0
# for label in labels:
# data_frame_to_use = data_frame[data_frame['Label'] == label]
#
# handle = ax1.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Travel']),
# width,
# bottom=time_delta_convert.zero)
#
# ax2.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Idle']),
# width,
# bottom=time_delta_convert.zero)
#
# ax3.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Overtime']),
# width,
# bottom=time_delta_convert.zero)
#
# handles.append(handle)
# position += 1
#
# ax1.yaxis_date()
# ax1.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax1.set_ylabel('Travel Time')
#
# ax2.yaxis_date()
# ax2.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax2.set_ylabel('Idle Time')
#
# ax3.yaxis_date()
# ax3.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax3.set_ylabel('Total Overtime')
# ax3.set_xlabel('Day of October 2017')
#
# translate_labels = {
# '3rd Stage': 'Optimizer',
# 'Human Planners': 'Human Planners'
# }
# labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
#
# rows.plot.add_legend(ax3, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -1.1))
# figure.tight_layout()
# figure.subplots_adjust(bottom=0.20)
#
# rows.plot.save_figure(output_file, output_file_format)
# finally:
# matplotlib.pyplot.cla()
# matplotlib.pyplot.close(figure)
def calculate_forecast_visit_duration(problem):
forecast_visit_duration = rows.plot.VisitDict()
for recurring_visits in problem.visits:
for local_visit in recurring_visits.visits:
forecast_visit_duration[local_visit] = local_visit.duration
return forecast_visit_duration
def compare_workload(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
base_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __BASE_SCHEDULE_PATTERN))}
base_schedule_by_date = {schedule.metadata.begin: schedule for schedule in base_schedules}
candidate_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __CANDIDATE_SCHEDULE_PATTERN))}
candidate_schedule_by_date = {schedule.metadata.begin: schedule for schedule in candidate_schedules}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
output_file_format = getattr(args, __FILE_FORMAT_ARG)
dates = set(candidate_schedule_by_date.keys())
for date in base_schedule_by_date.keys():
dates.add(date)
dates = list(dates)
dates.sort()
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for date in dates:
base_schedule = base_schedule_by_date.get(date, None)
if not base_schedule:
logging.error('No base schedule is available for %s', date)
continue
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(base_schedule)
candidate_schedule = candidate_schedule_by_date.get(date, None)
if not candidate_schedule:
logging.error('No candidate schedule is available for %s', date)
continue
base_schedule_file = base_schedules[base_schedule]
base_schedule_data_frame = rows.plot.get_schedule_data_frame(base_schedule, problem, duration_estimator, distance_estimator)
base_schedule_stem, base_schedule_ext = os.path.splitext(os.path.basename(base_schedule_file))
rows.plot.save_workforce_histogram(base_schedule_data_frame, base_schedule_stem, output_file_format)
candidate_schedule_file = candidate_schedules[candidate_schedule]
candidate_schedule_data_frame = rows.plot.get_schedule_data_frame(candidate_schedule, problem, duration_estimator, distance_estimator)
candidate_schedule_stem, candidate_schedule_ext \
= os.path.splitext(os.path.basename(candidate_schedule_file))
rows.plot.save_workforce_histogram(candidate_schedule_data_frame,
candidate_schedule_stem,
output_file_format)
rows.plot.save_combined_histogram(candidate_schedule_data_frame,
base_schedule_data_frame,
['2nd Stage', '3rd Stage'],
'contrast_workforce_{0}_combined'.format(date),
output_file_format)
def contrast_workload(args, settings):
__WIDTH = 0.35
__FORMAT = 'svg'
plot_type = getattr(args, __TYPE_ARG, None)
if plot_type != __ACTIVITY_TYPE and plot_type != __VISITS_TYPE:
raise ValueError(
'Unknown plot type: {0}. Use either {1} or {2}.'.format(plot_type, __ACTIVITY_TYPE, __VISITS_TYPE))
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
base_schedule = rows.load.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.load.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
if base_schedule.metadata.begin != candidate_schedule.metadata.begin:
raise ValueError('Schedules begin at a different date: {0} vs {1}'
.format(base_schedule.metadata.begin, candidate_schedule.metadata.begin))
if base_schedule.metadata.end != candidate_schedule.metadata.end:
raise ValueError('Schedules end at a different date: {0} vs {1}'
.format(base_schedule.metadata.end, candidate_schedule.metadata.end))
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
date = base_schedule.metadata.begin
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
with rows.plot.create_routing_session() as routing_session:
observed_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
base_schedule_frame = rows.plot.get_schedule_data_frame(base_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
candidate_schedule_frame = rows.plot.get_schedule_data_frame(candidate_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, axis = matplotlib.pyplot.subplots()
matplotlib.pyplot.tight_layout()
try:
contrast_frame = pandas.DataFrame.merge(base_schedule_frame,
candidate_schedule_frame,
on='Carer',
how='left',
suffixes=['_Base', '_Candidate'])
contrast_frame['Visits_Candidate'] = contrast_frame['Visits_Candidate'].fillna(0)
contrast_frame['Availability_Candidate'] \
= contrast_frame['Availability_Candidate'].mask(pandas.isnull, contrast_frame['Availability_Base'])
contrast_frame['Travel_Candidate'] \
= contrast_frame['Travel_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame['Service_Candidate'] \
= contrast_frame['Service_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame = contrast_frame.sort_values(
by=['Availability_Candidate', 'Service_Candidate', 'Travel_Candidate'],
ascending=False)
if plot_type == __VISITS_TYPE:
indices = numpy.arange(len(contrast_frame.index))
base_handle = axis.bar(indices, contrast_frame['Visits_Base'], __WIDTH)
candidate_handle = axis.bar(indices + __WIDTH, contrast_frame['Visits_Candidate'], __WIDTH)
axis.legend((base_handle, candidate_handle),
('Human Planners', 'Constraint Programming'), loc='best')
output_file = problem_file_name + '_contrast_visits_' + date.isoformat() + '.' + __FORMAT
elif plot_type == __ACTIVITY_TYPE:
indices = numpy.arange(len(base_schedule_frame.index))
def plot_activity_stacked_histogram(availability, travel, service, axis, width=0.35, initial_width=0.0,
color_offset=0):
time_delta_converter = rows.plot.TimeDeltaConverter()
travel_series = numpy.array(time_delta_converter(travel))
service_series = numpy.array(time_delta_converter(service))
idle_overtime_series = list(availability - travel - service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(),
idle_overtime_series)))
service_handle = axis.bar(indices + initial_width, service_series,
width,
bottom=time_delta_converter.zero,
color=color_map.colors[0 + color_offset])
travel_handle = axis.bar(indices + initial_width,
travel_series,
width,
bottom=service_series + time_delta_converter.zero_num,
color=color_map.colors[2 + color_offset])
idle_handle = axis.bar(indices + initial_width,
idle_series,
width,
bottom=service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[4 + color_offset])
overtime_handle = axis.bar(indices + initial_width,
overtime_series,
width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[6 + color_offset])
return service_handle, travel_handle, idle_handle, overtime_handle
travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Candidate,
contrast_frame.Travel_Candidate,
contrast_frame.Service_Candidate,
axis,
__WIDTH)
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Base,
contrast_frame.Travel_Base,
contrast_frame.Service_Base,
axis,
__WIDTH,
__WIDTH,
1)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend(
(travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle,
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle),
('', '', '', '', 'Service', 'Travel', 'Idle', 'Overtime'), loc='best', ncol=2, columnspacing=0)
output_file = problem_file_name + '_contrast_activity_' + date.isoformat() + '.' + __FORMAT
bottom, top = axis.get_ylim()
axis.set_ylim(bottom, top + 0.025)
else:
raise ValueError('Unknown plot type {0}'.format(plot_type))
matplotlib.pyplot.subplots_adjust(left=0.125)
matplotlib.pyplot.savefig(output_file, format=__FORMAT, dpi=300)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def parse_time_delta(text):
if text:
time = datetime.datetime.strptime(text, '%H:%M:%S').time()
return datetime.timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
return None
class TraceLog:
__STAGE_PATTERN = re.compile('^\w+(?P<number>\d+)(:?\-Patch)?$')
__PENALTY_PATTERN = re.compile('^MissedVisitPenalty:\s+(?P<penalty>\d+)$')
__CARER_USED_PATTERN = re.compile('^CarerUsedPenalty:\s+(?P<penalty>\d+)$')
class ProgressMessage:
def __init__(self, **kwargs):
self.__branches = kwargs.get('branches', None)
self.__cost = kwargs.get('cost', None)
self.__dropped_visits = kwargs.get('dropped_visits', None)
self.__memory_usage = kwargs.get('memory_usage', None)
self.__solutions = kwargs.get('solutions', None)
self.__wall_time = parse_time_delta(kwargs.get('wall_time', None))
@property
def cost(self):
return self.__cost
@property
def solutions(self):
return self.__solutions
@property
def dropped_visits(self):
return self.__dropped_visits
class ProblemMessage:
def __init__(self, **kwargs):
self.__carers = kwargs.get('carers', None)
self.__visits = kwargs.get('visits', None)
self.__date = kwargs.get('date', None)
if self.__date:
self.__date = datetime.datetime.strptime(self.__date, '%Y-%b-%d').date()
self.__visit_time_windows = parse_time_delta(kwargs.get('visit_time_windows', None))
self.__break_time_windows = parse_time_delta(kwargs.get('break_time_windows', None))
self.__shift_adjustment = parse_time_delta(kwargs.get('shift_adjustment', None))
self.__area = kwargs.get('area', None)
self.__missed_visit_penalty = kwargs.get('missed_visit_penalty', None)
self.__carer_used_penalty = kwargs.get('carer_used_penalty', None)
@property
def date(self):
return self.__date
@property
def carers(self):
return self.__carers
@property
def visits(self):
return self.__visits
@property
def visit_time_window(self):
return self.__visit_time_windows
@property
def carer_used_penalty(self):
return self.__carer_used_penalty
@carer_used_penalty.setter
def carer_used_penalty(self, value):
self.__carer_used_penalty = value
@property
def missed_visit_penalty(self):
return self.__missed_visit_penalty
@missed_visit_penalty.setter
def missed_visit_penalty(self, value):
self.__missed_visit_penalty = value
@property
def shift_adjustment(self):
return self.__shift_adjustment
StageSummary = collections.namedtuple('StageSummary', ['duration', 'final_cost', 'final_dropped_visits'])
def __init__(self, time_point):
self.__start = time_point
self.__events = []
self.__current_stage = None
self.__current_strategy = None
self.__problem = TraceLog.ProblemMessage()
@staticmethod
def __parse_stage_number(body):
comment = body.get('comment', None)
if comment:
match = TraceLog.__STAGE_PATTERN.match(comment)
if match:
return int(match.group('number'))
return None
def append(self, time_point, body):
if 'branches' in body:
body_to_use = TraceLog.ProgressMessage(**body)
elif 'type' in body:
if body['type'] == 'started':
self.__current_stage = self.__parse_stage_number(body)
elif body['type'] == 'finished':
self.__current_stage = None
self.__current_strategy = None
elif body['type'] == 'unknown':
if 'comment' in body:
if 'MissedVisitPenalty' in body['comment']:
match = re.match(self.__PENALTY_PATTERN, body['comment'])
assert match is not None
missed_visit_penalty = int(match.group('penalty'))
self.__problem.missed_visit_penalty = missed_visit_penalty
elif 'CarerUsedPenalty' in body['comment']:
match = re.match(self.__CARER_USED_PATTERN, body['comment'])
assert match is not None
carer_used_penalty = int(match.group('penalty'))
self.__problem.carer_used_penalty = carer_used_penalty
body_to_use = body
elif 'area' in body:
body_to_use = TraceLog.ProblemMessage(**body)
if body_to_use.missed_visit_penalty is None and self.__problem.missed_visit_penalty is not None:
body_to_use.missed_visit_penalty = self.__problem.missed_visit_penalty
if body_to_use.carer_used_penalty is None and self.__problem.carer_used_penalty is not None:
body_to_use.carer_used_penalty = self.__problem.carer_used_penalty
self.__problem = body_to_use
else:
body_to_use = body
# quick fix to prevent negative computation time if the time frame crosses midnight
if self.__start < time_point:
computation_time = time_point - self.__start
else:
computation_time = time_point + datetime.timedelta(hours=24) - self.__start
self.__events.append([computation_time, self.__current_stage, self.__current_strategy, time_point, body_to_use])
def compute_stages(self) -> typing.List[StageSummary]:
groups = dict()
for delta, stage, topic, time, message in self.__events:
if isinstance(message, TraceLog.ProgressMessage):
if stage not in groups:
groups[stage] = []
groups[stage].append([delta, topic, message])
result = []
def create_stage_summary(group):
duration = group[-1][0] - group[0][0]
cost = group[-1][2].cost
dropped_visits = group[-1][2].dropped_visits
return TraceLog.StageSummary(duration=duration, final_cost=cost, final_dropped_visits=dropped_visits)
if len(groups) == 1:
result.append(create_stage_summary(groups[None]))
else:
for stage in range(1, max(filter(lambda s: s is not None, groups)) + 1):
result.append(create_stage_summary(groups[stage]))
return result
def has_stages(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if isinstance(event, TraceLog.ProblemMessage) or isinstance(event, TraceLog.ProgressMessage):
continue
if 'type' in event and event['type'] == 'started':
return True
return False
def best_cost(self, stage: int):
best_cost, _ = self.__best_cost_and_time(stage)
return best_cost
def best_cost_time(self, stage: int):
_, best_cost_time = self.__best_cost_and_time(stage)
return best_cost_time
def last_cost(self):
last_cost, _ = self.__last_cost_and_time()
return last_cost
def last_cost_time(self):
_, last_cost_time = self.__last_cost_and_time()
return last_cost_time
def computation_time(self):
computation_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__events:
computation_time = relative_time
return computation_time
def __best_cost_and_time(self, stage: int):
best_cost = float('inf')
best_time = datetime.timedelta.max
for relative_time, event_stage, strategy, absolute_time, event in self.__filtered_events():
if event_stage > stage:
continue
if best_cost > event.cost:
best_cost = event.cost
best_time = relative_time
return best_cost, best_time
def __last_cost_and_time(self):
last_cost = float('inf')
last_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__filtered_events():
last_cost = event.cost
last_time = relative_time
return last_cost, last_time
def __filtered_events(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if stage != 2 and stage != 3:
continue
if strategy == 'DELAY_RISKINESS_REDUCTION':
continue
if not isinstance(event, TraceLog.ProgressMessage):
continue
yield relative_time, stage, strategy, absolute_time, event
@property
def strategy(self):
return self.__current_strategy
@strategy.setter
def strategy(self, value):
self.__current_strategy = value
@property
def visits(self):
return self.__problem.visits
@property
def carers(self):
return self.__problem.carers
@property
def date(self):
return self.__problem.date
@property
def visit_time_window(self):
return self.__problem.visit_time_window
@property
def carer_used_penalty(self):
return self.__problem.carer_used_penalty
@property
def missed_visit_penalty(self):
return self.__problem.missed_visit_penalty
@property
def shift_adjustment(self):
return self.__problem.shift_adjustment
@property
def events(self):
return self.__events
def read_traces(trace_file) -> typing.List[TraceLog]:
log_line_pattern = re.compile('^\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?]\s+(?P<body>.*)$')
other_line_pattern = re.compile('^.*?\[\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?\]\s+(?P<body>.*)$')
strategy_line_pattern = re.compile('^Solving the (?P<stage_name>\w+) stage using (?P<strategy_name>\w+) strategy$')
loaded_visits_pattern = re.compile('^Loaded past visits in \d+ seconds$')
trace_logs = []
has_preambule = False
with open(trace_file, 'r') as input_stream:
current_log = None
for line in input_stream:
match = log_line_pattern.match(line)
if not match:
match = other_line_pattern.match(line)
if match:
raw_time = match.group('time')
time = datetime.datetime.strptime(raw_time, '%H:%M:%S.%f')
try:
raw_body = match.group('body')
body = json.loads(raw_body)
if 'comment' in body and (body['comment'] == 'All'
or 'MissedVisitPenalty' in body['comment']
or 'CarerUsedPenalty' in body['comment']):
if body['comment'] == 'All':
if 'type' in body:
if body['type'] == 'finished':
has_preambule = False
current_log.strategy = None
elif body['type'] == 'started':
has_preambule = True
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
elif 'area' in body and not has_preambule:
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
except json.decoder.JSONDecodeError:
strategy_match = strategy_line_pattern.match(match.group('body'))
if strategy_match:
current_log.strategy = strategy_match.group('strategy_name')
continue
loaded_visits_match = loaded_visits_pattern.match(match.group('body'))
if loaded_visits_match:
continue
warnings.warn('Failed to parse line: ' + line)
elif 'GUIDED_LOCAL_SEARCH specified without sane timeout: solve may run forever.' in line:
continue
else:
warnings.warn('Failed to match line: ' + line)
return trace_logs
def traces_to_data_frame(trace_logs):
columns = ['relative_time', 'cost', 'dropped_visits', 'solutions', 'stage', 'stage_started', 'date', 'carers',
'visits']
has_stages = [trace.has_stages() for trace in trace_logs]
if all(has_stages) != any(has_stages):
raise ValueError('Some traces have stages while others do not')
has_stages = all(has_stages)
data = []
if has_stages:
for trace in trace_logs:
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
if not current_stage_name:
continue
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
current_stage_name, current_stage_started,
trace.date, current_carers, current_visits])
elif 'type' in event:
if 'comment' in event and event['type'] == 'unknown':
continue
if event['type'] == 'finished':
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
continue
if event['type'] == 'started':
current_stage_started = rel_time
current_stage_name = event['comment']
else:
for trace in trace_logs:
current_carers = None
current_visits = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
None, None,
trace.date, current_carers, current_visits])
return pandas.DataFrame(data=data, columns=columns)
def parse_pandas_duration(value):
raw_hours, raw_minutes, raw_seconds = value.split(':')
return datetime.timedelta(hours=int(raw_hours), minutes=int(raw_minutes), seconds=int(raw_seconds))
class DateTimeFormatter:
def __init__(self, format):
self.__format = format
def __call__(self, x, pos=None):
if x < 0:
return None
x_to_use = x
if isinstance(x, numpy.int64):
x_to_use = x.item()
delta = datetime.timedelta(seconds=x_to_use)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime(self.__format)
class AxisSettings:
def __init__(self, minutes_per_step, format_pattern, units_label, right_xlimit, xticks):
self.__minutes_per_step = minutes_per_step
self.__format_pattern = format_pattern
self.__formatter = matplotlib.ticker.FuncFormatter(DateTimeFormatter(self.__format_pattern))
self.__units_label = units_label
self.__right_xlimit = right_xlimit
self.__xticks = xticks
@property
def formatter(self):
return self.__formatter
@property
def units_label(self):
return self.__units_label
@property
def right_xlimit(self):
return self.__right_xlimit
@property
def xticks(self):
return self.__xticks
@staticmethod
def infer(max_relative_time):
if datetime.timedelta(minutes=30) < max_relative_time < datetime.timedelta(hours=1):
minutes_step = 10
format = '%H:%M'
units = '[hh:mm]'
elif datetime.timedelta(hours=1) <= max_relative_time:
minutes_step = 60
format = '%H:%M'
units = '[hh:mm]'
else:
assert max_relative_time <= datetime.timedelta(minutes=30)
minutes_step = 5
format = '%M:%S'
units = '[mm:ss]'
right_xlimit = (max_relative_time + datetime.timedelta(minutes=1)).total_seconds() // 60 * 60
xticks = numpy.arange(0, max_relative_time.total_seconds() + minutes_step * 60, minutes_step * 60)
return AxisSettings(minutes_step, format, units, right_xlimit, xticks)
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
return '{0:02d}:{1:02d}'.format(hours, minutes)
def format_time(x, pos=None):
if isinstance(x, numpy.int64):
x = x.item()
delta = datetime.timedelta(seconds=x)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime('%H:%M')
__SCATTER_POINT_SIZE = 1
__Y_AXIS_EXTENSION = 1.2
def add_trace_legend(axis, handles, bbox_to_anchor=(0.5, -0.23), ncol=3):
first_row = handles[0]
def legend_single_stage(row):
handle, multi_visits, visits, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02} {3} {4}'.format(multi_visits,
visits,
carers,
cost_function,
date_time.strftime('%d-%m'))
def legend_multi_stage(row):
handle, multi_visits, visits, multi_carers, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02}/{3:02} {4} {5}' \
.format(multi_visits, visits, multi_carers, carers, cost_function, date_time.strftime('%d-%m'))
if len(first_row) == 6:
legend_formatter = legend_single_stage
elif len(first_row) == 7:
legend_formatter = legend_multi_stage
else:
raise ValueError('Expecting row of either 6 or 7 elements')
return rows.plot.add_legend(axis,
list(map(operator.itemgetter(0), handles)),
list(map(legend_formatter, handles)),
ncol,
bbox_to_anchor)
def scatter_cost(axis, data_frame, color):
return axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']], data_frame['cost'],
s=__SCATTER_POINT_SIZE,
c=color)
def scatter_dropped_visits(axis, data_frame, color):
axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']],
data_frame['dropped_visits'],
s=__SCATTER_POINT_SIZE,
c=color)
def draw_avline(axis, point, color='lightgrey', linestyle='--'):
axis.axvline(point, color=color, linestyle=linestyle, linewidth=0.8, alpha=0.8)
def get_problem_stats(problem, date):
problem_visits = [visit for carer_visits in problem.visits
for visit in carer_visits.visits if visit.date == date]
return len(problem_visits), len([visit for visit in problem_visits if visit.carer_count > 1])
def compare_trace(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
trace_file = get_or_raise(args, __FILE_ARG)
trace_file_base_name = os.path.basename(trace_file)
trace_file_stem, trace_file_ext = os.path.splitext(trace_file_base_name)
output_file_stem = getattr(args, __OUTPUT, trace_file_stem)
trace_logs = read_traces(trace_file)
data_frame = traces_to_data_frame(trace_logs)
current_date = getattr(args, __DATE_ARG, None)
dates = data_frame['date'].unique()
if current_date and current_date not in dates:
raise ValueError('Date {0} is not present in the data set'.format(current_date))
color_numbers = [0, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11, 13]
color_number_it = iter(color_numbers)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
max_relative_time = datetime.timedelta()
try:
if current_date:
current_color = color_map.colors[next(color_number_it)]
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
ax_settings = AxisSettings.infer(max_relative_time)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
handles = []
for stage in stages:
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
current_stage_data_frame = current_date_frame[current_date_frame['stage'] == stage]
draw_avline(ax1, time_delta.total_seconds())
draw_avline(ax2, time_delta.total_seconds())
total_stage_visits = current_stage_data_frame['visits'].iloc[0]
carers = current_stage_data_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_stage_data_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_stage_visits,
carers,
cost_function,
current_date])
ax2.set_xlim(left=0)
ax2.set_ylim(bottom=-10)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = ax1.scatter(
[time_delta.total_seconds() for time_delta in current_date_frame['relative_time']],
current_date_frame['cost'], s=1)
add_trace_legend(ax1, [[handle, total_multiple_carer_visits, total_problem_visits, carers, cost_function]])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
else:
handles = []
for current_date in dates:
current_color = color_map.colors[next(color_number_it)]
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
stage_linestyles = [None, 'dotted', 'dashed']
for stage, linestyle in zip(stages, stage_linestyles):
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
total_carers = current_date_frame['carers'].max()
multi_carers = current_date_frame['carers'].min()
if multi_carers == total_carers:
multi_carers = 0
total_visits = current_date_frame['visits'].max()
multi_visits = current_date_frame['visits'].min()
if multi_visits == total_visits:
multi_visits = 0
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_date_frame, current_color)
handles.append([handle,
multi_visits,
total_visits,
multi_carers,
total_carers,
cost_function,
current_date])
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_problem_visits,
carers,
cost_function,
current_date])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax_settings = AxisSettings.infer(max_relative_time)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
# if add_arrows:
# ax1.arrow(950, 200000, 40, -110000, head_width=10, head_length=20000, fc='k', ec='k')
# ax2.arrow(950, 60, 40, -40, head_width=10, head_length=10, fc='k', ec='k')
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_xlim(left=0, right=ax_settings.right_xlimit)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.set_xlim(left=0, right=ax_settings.right_xlimit)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def get_schedule_stats(data_frame):
def get_stage_stats(stage):
if stage and (isinstance(stage, str) or (isinstance(stage, float) and not numpy.isnan(stage))):
stage_frame = data_frame[data_frame['stage'] == stage]
else:
stage_frame = data_frame[data_frame['stage'].isnull()]
min_carers, max_carers = stage_frame['carers'].min(), stage_frame['carers'].max()
if min_carers != max_carers:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_carers, max_carers))
min_visits, max_visits = stage_frame['visits'].min(), stage_frame['visits'].max()
if min_visits != max_visits:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_visits, max_visits))
return min_carers, min_visits
stages = data_frame['stage'].unique()
if len(stages) > 1:
data = []
for stage in stages:
carers, visits = get_stage_stats(stage)
data.append([stage, carers, visits])
return data
else:
stage_to_use = None
if len(stages) == 1:
stage_to_use = stages[0]
carers, visits = get_stage_stats(stage_to_use)
return [[None, carers, visits]]
def contrast_trace(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
output_file_stem = getattr(args, __OUTPUT, problem_file_name + '_contrast_traces')
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
candidate_trace_file = get_or_raise(args, __CANDIDATE_FILE_ARG)
base_frame = traces_to_data_frame(read_traces(base_trace_file))
candidate_frame = traces_to_data_frame(read_traces(candidate_trace_file))
current_date = get_or_raise(args, __DATE_ARG)
if current_date not in base_frame['date'].unique():
raise ValueError('Date {0} is not present in the base data set'.format(current_date))
if current_date not in candidate_frame['date'].unique():
raise ValueError('Date {0} is not present in the candidate data set'.format(current_date))
max_relative_time = datetime.timedelta()
max_relative_time = max(base_frame[base_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = max(candidate_frame[candidate_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = datetime.timedelta(minutes=20)
ax_settings = AxisSettings.infer(max_relative_time)
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
def plot(data_frame, color):
stages = data_frame['stage'].unique()
if len(stages) > 1:
for stage, linestyle in zip(stages, [None, 'dotted', 'dashed']):
time_delta = data_frame[data_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), linestyle=linestyle)
scatter_dropped_visits(ax2, data_frame, color=color)
return scatter_cost(ax1, data_frame, color=color)
base_current_data_frame = base_frame[base_frame['date'] == current_date]
base_handle = plot(base_current_data_frame, color_map.colors[0])
base_stats = get_schedule_stats(base_current_data_frame)
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
candidate_handle = plot(candidate_current_data_frame, color_map.colors[1])
candidate_stats = get_schedule_stats(candidate_current_data_frame)
labels = []
for stages in [base_stats, candidate_stats]:
if len(stages) == 1:
labels.append('Direct')
elif len(stages) > 1:
labels.append('Multistage')
else:
raise ValueError()
ax1.set_ylim(bottom=0.0)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0.0, right=max_relative_time.total_seconds())
legend1 = ax1.legend([base_handle, candidate_handle], labels)
for handle in legend1.legendHandles:
handle._sizes = [25]
ax2.set_xlim(left=0.0, right=max_relative_time.total_seconds())
ax2.set_ylim(bottom=0.0)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax1.set_xticks(ax_settings.xticks)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
legend2 = ax2.legend([base_handle, candidate_handle], labels)
for handle in legend2.legendHandles:
handle._sizes = [25]
figure.tight_layout()
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
scatter_dropped_visits(ax2, candidate_current_data_frame, color=color_map.colors[1])
scatter_cost(ax1, candidate_current_data_frame, color=color_map.colors[1])
stage2_started = \
candidate_current_data_frame[candidate_current_data_frame['stage'] == 'Stage2']['stage_started'].iloc[0]
ax1.set_ylim(bottom=0, top=6 * 10 ** 4)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0, right=12)
ax2.set_xlim(left=0, right=12)
x_ticks_positions = range(0, 12 + 1, 2)
# matplotlib.pyplot.locator_params(axis='x', nbins=6)
ax2.set_ylim(bottom=-10.0, top=120)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(x_ticks_positions)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
# rows.plot.save_figure(output_file_stem + '_first_stage_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_box_plots(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
output_file_stem = getattr(args, __OUTPUT, problem_file_name)
traces = read_traces(base_trace_file)
figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(1, 3)
stages = [trace.compute_stages() for trace in traces]
num_stages = max(len(s) for s in stages)
durations = [[getattr(local_stage[num_stage], 'duration').total_seconds() for local_stage in stages] for num_stage in range(num_stages)]
max_duration = max(max(stage_durations) for stage_durations in durations)
axis_settings = AxisSettings.infer(datetime.timedelta(seconds=max_duration))
try:
ax1.boxplot(durations, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax1.set_yticks(axis_settings.xticks)
ax1.yaxis.set_major_formatter(axis_settings.formatter)
ax1.set_xlabel('Stage')
ax1.set_ylabel('Duration [hh:mm]')
costs = [[getattr(local_stage[num_stage], 'final_cost') for local_stage in stages] for num_stage in range(num_stages)]
ax2.boxplot(costs, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
formatter = matplotlib.ticker.ScalarFormatter()
formatter.set_scientific(True)
formatter.set_powerlimits((-3, 3))
ax2.yaxis.set_major_formatter(formatter)
ax2.set_xlabel('Stage')
ax2.set_ylabel('Cost')
declined_visits = [[getattr(local_stage[num_stage], 'final_dropped_visits') for local_stage in stages] for num_stage in range(num_stages)]
ax3.boxplot(declined_visits, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
max_declined_visits = max(max(declined_visits))
ax3.set_xlabel('Stage')
ax3.set_ylabel('Declined Visits')
dropped_visit_ticks = None
if max_declined_visits < 100:
dropped_visit_ticks = range(0, max_declined_visits + 1)
else:
dropped_visit_ticks = range(0, max_declined_visits + 100, 100)
ax3.set_yticks(dropped_visit_ticks)
figure.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_prediction_error(args, settings):
base_schedule = rows.plot.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.plot.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
observed_duration_by_visit = rows.plot.calculate_observed_visit_duration(base_schedule)
expected_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
data = []
for visit in base_schedule.visits:
observed_duration = observed_duration_by_visit[visit.visit]
expected_duration = expected_duration_by_visit[visit.visit]
data.append([visit.key, observed_duration.total_seconds(), expected_duration.total_seconds()])
frame = pandas.DataFrame(columns=['Visit', 'ObservedDuration', 'ExpectedDuration'], data=data)
frame['Error'] = (frame.ObservedDuration - frame.ExpectedDuration) / frame.ObservedDuration
figure, axis = matplotlib.pyplot.subplots()
try:
axis.plot(frame['Error'], label='(Observed - Expected)/Observed)')
axis.legend()
axis.set_ylim(-20, 2)
axis.grid()
matplotlib.pyplot.show()
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def remove_violated_visits(rough_schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
duration_estimator: rows.plot.DurationEstimator,
distance_estimator: rows.plot.DistanceEstimator) -> rows.model.schedule.Schedule:
max_delay = metadata.visit_time_window
min_delay = -metadata.visit_time_window
dropped_visits = 0
allowed_visits = []
for route in rough_schedule.routes:
carer_diary = problem.get_diary(route.carer, metadata.date)
if not carer_diary:
continue
for visit in route.visits:
if visit.check_in is not None:
check_in_delay = visit.check_in - datetime.datetime.combine(metadata.date, visit.time)
if check_in_delay > max_delay: # or check_in_delay < min_delay:
dropped_visits += 1
continue
allowed_visits.append(visit)
# schedule does not have visits which exceed time windows
first_improved_schedule = rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
allowed_visits = []
for route in first_improved_schedule.routes:
if not route.visits:
continue
diary = problem.get_diary(route.carer, metadata.date)
assert diary is not None
# shift adjustment is added twice because it is allowed to extend the time before and after the working hours
max_shift_end = max(event.end for event in diary.events) + metadata.shift_adjustment + metadata.shift_adjustment
first_visit = route.visits[0]
current_time = datetime.datetime.combine(metadata.date, first_visit.time)
if current_time <= max_shift_end:
allowed_visits.append(first_visit)
visits_made = []
total_slack = datetime.timedelta()
if len(route.visits) == 1:
visit = route.visits[0]
visit_duration = duration_estimator(visit.visit)
if visit_duration is None:
visit_duration = visit.duration
current_time += visit_duration
if current_time <= max_shift_end:
visits_made.append(visit)
else:
dropped_visits += 1
else:
for prev_visit, next_visit in route.edges():
visit_duration = duration_estimator(prev_visit.visit)
if visit_duration is None:
visit_duration = prev_visit.duration
current_time += visit_duration
current_time += distance_estimator(prev_visit, next_visit)
start_time = max(current_time, datetime.datetime.combine(metadata.date, next_visit.time) - max_delay)
total_slack += start_time - current_time
current_time = start_time
if current_time <= max_shift_end:
visits_made.append(next_visit)
else:
dropped_visits += 1
if current_time <= max_shift_end:
total_slack += max_shift_end - current_time
total_break_duration = datetime.timedelta()
for carer_break in diary.breaks:
total_break_duration += carer_break.duration
if total_slack + datetime.timedelta(hours=2) < total_break_duration:
# route is not respecting contractual breaks
visits_made.pop()
for visit in visits_made:
allowed_visits.append(visit)
# schedule does not contain visits which exceed overtime of the carer
return rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
class ScheduleCost:
CARER_COST = datetime.timedelta(seconds=60 * 60 * 4)
def __init__(self, travel_time: datetime.timedelta, carers_used: int, visits_missed: int, missed_visit_penalty: int):
self.__travel_time = travel_time
self.__carers_used = carers_used
self.__visits_missed = visits_missed
self.__missed_visit_penalty = missed_visit_penalty
@property
def travel_time(self) -> datetime.timedelta:
return self.__travel_time
@property
def visits_missed(self) -> int:
return self.__visits_missed
@property
def missed_visit_penalty(self) -> int:
return self.__missed_visit_penalty
@property
def carers_used(self) -> int:
return self.__carers_used
def total_cost(self, include_vehicle_cost: bool) -> datetime.timedelta:
cost = self.__travel_time.total_seconds() + self.__missed_visit_penalty * self.__visits_missed
if include_vehicle_cost:
cost += self.CARER_COST.total_seconds() * self.__carers_used
return cost
def get_schedule_cost(schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
distance_estimator: rows.plot.DistanceEstimator) -> ScheduleCost:
carer_used_ids = set()
visit_made_ids = set()
travel_time = datetime.timedelta()
for route in schedule.routes:
if not route.visits:
continue
carer_used_ids.add(route.carer.sap_number)
for visit in route.visits:
visit_made_ids.add(visit.visit.key)
for source, destination in route.edges():
travel_time += distance_estimator(source, destination)
available_visit_ids = {visit.key for visit in problem.requested_visits(schedule.date)}
return ScheduleCost(travel_time, len(carer_used_ids), len(available_visit_ids.difference(visit_made_ids)), metadata.missed_visit_penalty)
def compare_schedule_cost(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig',
['ProblemPath', 'HumanSolutionPath', 'SolverSecondSolutionPath', 'SolverThirdSolutionPath'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/second_stage_c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
include_vehicle_cost = False
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
def normalize_cost(value) -> float:
if isinstance(value, datetime.timedelta):
value_to_use = value.total_seconds()
elif isinstance(value, float) or isinstance(value, int):
value_to_use = value
else:
return float('inf')
return round(value_to_use / 3600, 2)
for solver_trace, problem_data in list(zip(solver_traces, problem_data)):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_second_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSecondSolutionPath))
solver_third_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverThirdSolutionPath))
assert solver_second_schedule.date == human_schedule.date
assert solver_third_schedule.date == human_schedule.date
available_carers = problem.available_carers(human_schedule.date)
requested_visits = problem.requested_visits(human_schedule.date)
one_carer_visits = [visit for visit in requested_visits if visit.carer_count == 1]
two_carer_visits = [visit for visit in requested_visits if visit.carer_count == 2]
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_third_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_second_schedule_to_use = remove_violated_visits(solver_second_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
solver_third_schedule_to_use = remove_violated_visits(solver_third_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
human_cost = get_schedule_cost(human_schedule_to_use, solver_trace, problem, distance_estimator)
solver_second_cost = get_schedule_cost(solver_second_schedule_to_use, solver_trace, problem, distance_estimator)
solver_third_cost = get_schedule_cost(solver_third_schedule_to_use, solver_trace, problem, distance_estimator)
results.append(collections.OrderedDict(date=solver_trace.date,
day=solver_trace.date.day,
carers=len(available_carers),
one_carer_visits=len(one_carer_visits),
two_carer_visits=2 * len(two_carer_visits),
missed_visit_penalty=normalize_cost(solver_trace.missed_visit_penalty),
carer_used_penalty=normalize_cost(solver_trace.carer_used_penalty),
planner_missed_visits=human_cost.visits_missed,
solver_second_missed_visits=solver_second_cost.visits_missed,
solver_third_missed_visits=solver_third_cost.visits_missed,
planner_travel_time=normalize_cost(human_cost.travel_time),
solver_second_travel_time=normalize_cost(solver_second_cost.travel_time),
solver_third_travel_time=normalize_cost(solver_third_cost.travel_time),
planner_carers_used=human_cost.carers_used,
solver_second_carers_used=solver_second_cost.carers_used,
solver_third_carers_used=solver_third_cost.carers_used,
planner_total_cost=normalize_cost(human_cost.total_cost(include_vehicle_cost)),
solver_second_total_cost=normalize_cost(solver_second_cost.total_cost(include_vehicle_cost)),
solver_third_total_cost=normalize_cost(solver_third_cost.total_cost(include_vehicle_cost)),
solver_second_time=int(math.ceil(solver_trace.best_cost_time(2).total_seconds())),
solver_third_time=int(math.ceil(solver_trace.best_cost_time(3).total_seconds()))))
data_frame = pandas.DataFrame(data=results)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'carers', 'one_carer_visits', 'two_carer_visits', 'missed_visit_penalty',
'planner_total_cost', 'solver_second_total_cost', 'solver_third_total_cost',
'planner_missed_visits', 'solver_second_missed_visits', 'solver_third_missed_visits',
'planner_travel_time', 'solver_second_travel_time', 'solver_third_travel_time', 'solver_second_time',
'solver_third_time']],
tablefmt='latex', headers='keys', showindex=False))
def get_consecutive_visit_time_span(schedule: rows.model.schedule.Schedule, start_time_estimator):
client_visits = collections.defaultdict(list)
for visit in schedule.visits:
client_visits[visit.visit.service_user].append(visit)
for client in client_visits:
visits = client_visits[client]
used_keys = set()
unique_visits = []
for visit in visits:
date_time = start_time_estimator(visit)
if date_time.hour == 0 and date_time.minute == 0:
continue
if visit.visit.key not in used_keys:
used_keys.add(visit.visit.key)
unique_visits.append(visit)
unique_visits.sort(key=start_time_estimator)
client_visits[client] = unique_visits
client_span = collections.defaultdict(datetime.timedelta)
for client in client_visits:
if len(client_visits[client]) < 2:
continue
last_visit = client_visits[client][0]
total_span = datetime.timedelta()
for next_visit in client_visits[client][1:]:
total_span += start_time_estimator(next_visit) - start_time_estimator(last_visit)
last_visit = next_visit
client_span[client] = total_span
return client_span
def get_carer_client_frequency(schedule: rows.model.schedule.Schedule):
client_assigned_carers = collections.defaultdict(collections.Counter)
for visit in schedule.visits:
client_assigned_carers[int(visit.visit.service_user)][int(visit.carer.sap_number)] += 1
return client_assigned_carers
def get_visits(problem: rows.model.problem.Problem, date: datetime.date):
visits = set()
for local_visits in problem.visits:
for visit in local_visits.visits:
if date != visit.date:
continue
visit.service_user = local_visits.service_user
visits.add(visit)
return visits
def get_teams(problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
multiple_carer_visit_keys = set()
for visit in get_visits(problem, schedule.date):
if visit.carer_count > 1:
multiple_carer_visit_keys.add(visit.key)
client_visit_carers = collections.defaultdict(lambda: collections.defaultdict(list))
for visit in schedule.visits:
if visit.visit.key not in multiple_carer_visit_keys:
continue
client_visit_carers[visit.visit.service_user][visit.visit.key].append(int(visit.carer.sap_number))
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
client_visit_carers[client][visit_key].sort()
teams = set()
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
teams.add(tuple(client_visit_carers[client][visit_key]))
return teams
def compare_schedule_quality(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'HumanSolutionPath', 'SolverSolutionPath'])
def compare_quality(solver_trace, problem, human_schedule, solver_schedule, duration_estimator, distance_estimator):
visits = get_visits(problem, solver_trace.date)
multiple_carer_visit_keys = {visit.key for visit in visits if visit.carer_count > 1}
clients = list({int(visit.service_user) for visit in visits})
# number of different carers assigned throughout the day
human_carer_frequency = get_carer_client_frequency(human_schedule)
solver_carer_frequency = get_carer_client_frequency(solver_schedule)
def median_carer_frequency(client_counters):
total_counters = []
for client in client_counters:
# total_counters += len(client_counters[client])
total_counters.append(len(client_counters[client]))
# return total_counters / len(client_counters)
return numpy.median(total_counters)
human_schedule_squared = []
solver_schedule_squared = []
for client in clients:
if client in human_carer_frequency:
human_schedule_squared.append(sum(human_carer_frequency[client][carer] ** 2 for carer in human_carer_frequency[client]))
else:
human_schedule_squared.append(0)
if client in solver_carer_frequency:
solver_schedule_squared.append(sum(solver_carer_frequency[client][carer] ** 2 for carer in solver_carer_frequency[client]))
else:
solver_schedule_squared.append(0)
human_matching_dominates = 0
solver_matching_dominates = 0
for index in range(len(clients)):
if human_schedule_squared[index] > solver_schedule_squared[index]:
human_matching_dominates += 1
elif human_schedule_squared[index] < solver_schedule_squared[index]:
solver_matching_dominates += 1
matching_no_diff = len(clients) - human_matching_dominates - solver_matching_dominates
assert matching_no_diff >= 0
human_schedule_span = get_consecutive_visit_time_span(human_schedule, lambda visit: visit.check_in)
solver_schedule_span = get_consecutive_visit_time_span(solver_schedule, lambda visit: datetime.datetime.combine(visit.date, visit.time))
human_span_dominates = 0
solver_span_dominates = 0
for client in clients:
if human_schedule_span[client] > solver_schedule_span[client]:
human_span_dominates += 1
elif human_schedule_span[client] < solver_schedule_span[client]:
solver_span_dominates += 1
span_no_diff = len(clients) - human_span_dominates - solver_span_dominates
assert span_no_diff > 0
human_teams = get_teams(problem, human_schedule)
solver_teams = get_teams(problem, solver_schedule)
human_schedule_frame = rows.plot.get_schedule_data_frame(human_schedule, problem, duration_estimator, distance_estimator)
solver_schedule_frame = rows.plot.get_schedule_data_frame(solver_schedule, problem, duration_estimator, distance_estimator)
human_visits = human_schedule_frame['Visits'].median()
solver_visits = solver_schedule_frame['Visits'].median()
human_total_overtime = compute_overtime(human_schedule_frame).sum()
solver_total_overtime = compute_overtime(solver_schedule_frame).sum()
return {'problem': str(human_schedule.date),
'visits': len(visits),
'clients': len(clients),
'human_overtime': human_total_overtime,
'solver_overtime': solver_total_overtime,
'human_visits_median': human_visits,
'solver_visits_median': solver_visits,
'human_visit_span_dominates': human_span_dominates,
'solver_visit_span_dominates': solver_span_dominates,
'visit_span_indifferent': span_no_diff,
'human_matching_dominates': human_matching_dominates,
'solver_matching_dominates': solver_matching_dominates,
'human_carer_frequency': median_carer_frequency(human_carer_frequency),
'solver_carer_frequency': median_carer_frequency(solver_carer_frequency),
'matching_indifferent': matching_no_diff,
'human_teams': len(human_teams),
'solver_teams': len(solver_teams)}
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for solver_trace, problem_data in zip(solver_traces, problem_data):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSolutionPath))
assert solver_trace.date == human_schedule.date
assert solver_trace.date == solver_schedule.date
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_schedule_to_use = remove_violated_visits(solver_schedule, solver_trace, problem, duration_estimator, distance_estimator)
row = compare_quality(solver_trace, problem, human_schedule_to_use, solver_schedule_to_use, duration_estimator, distance_estimator)
results.append(row)
data_frame = pandas.DataFrame(data=results)
data_frame['human_visit_span_dominates_rel'] = data_frame['human_visit_span_dominates'] / data_frame['clients']
data_frame['human_visit_span_dominates_rel_label'] = data_frame['human_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_visit_span_dominates_rel'] = data_frame['solver_visit_span_dominates'] / data_frame['clients']
data_frame['solver_visit_span_dominates_rel_label'] = data_frame['solver_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['visit_span_indifferent_rel'] = data_frame['visit_span_indifferent'] / data_frame['clients']
data_frame['human_matching_dominates_rel'] = data_frame['human_matching_dominates'] / data_frame['clients']
data_frame['human_matching_dominates_rel_label'] = data_frame['human_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_matching_dominates_rel'] = data_frame['solver_matching_dominates'] / data_frame['clients']
data_frame['solver_matching_dominates_rel_label'] = data_frame['solver_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['matching_indifferent_rel'] = data_frame['matching_indifferent'] / data_frame['clients']
data_frame['day'] = data_frame['problem'].apply(lambda label: datetime.datetime.strptime(label, '%Y-%m-%d').date().day)
data_frame['human_overtime_label'] = data_frame['human_overtime'].apply(get_time_delta_label)
data_frame['solver_overtime_label'] = data_frame['solver_overtime'].apply(get_time_delta_label)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'human_visits_median', 'solver_visits_median', 'human_overtime_label', 'solver_overtime_label',
'human_carer_frequency', 'solver_carer_frequency',
'human_matching_dominates_rel_label', 'solver_matching_dominates_rel_label',
'human_teams', 'solver_teams']], tablefmt='latex', showindex=False, headers='keys'))
BenchmarkData = collections.namedtuple('BenchmarkData', ['BestCost', 'BestCostTime', 'BestBound', 'ComputationTime'])
class MipTrace:
__MIP_HEADER_PATTERN = re.compile('^\s*Expl\s+Unexpl\s+|\s+Obj\s+Depth\s+IntInf\s+|\s+Incumbent\s+BestBd\s+Gap\s+|\s+It/Node\s+Time\s*$')
__MIP_LINE_PATTERN = re.compile('^(?P<solution_flag>[\w\*]?)\s*'
'(?P<explored_nodes>\d+)\s+'
'(?P<nodes_to_explore>\d+)\s+'
'(?P<node_relaxation>[\w\.]*)\s+'
'(?P<node_depth>\d*)\s+'
'(?P<fractional_variables>\w*)\s+'
'(?P<incumbent>[\d\.\-]*)\s+'
'(?P<lower_bound>[\d\.\-]*)\s+'
'(?P<gap>[\d\.\%\-]*)\s+'
'(?P<simplex_it_per_node>[\d\.\-]*)\s+'
'(?P<elapsed_time>\d+)s$')
__SUMMARY_PATTERN = re.compile('^Best\sobjective\s(?P<objective>[e\d\.\+]+),\s'
'best\sbound\s(?P<bound>[e\d\.\+]+),\s'
'gap\s(?P<gap>[e\d\.\+]+)\%$')
class MipProgressMessage:
def __init__(self, has_solution, best_cost, lower_bound, elapsed_time):
self.__has_solution = has_solution
self.__best_cost = best_cost
self.__lower_bound = lower_bound
self.__elapsed_time = elapsed_time
@property
def has_solution(self):
return self.__has_solution
@property
def best_cost(self):
return self.__best_cost
@property
def lower_bound(self):
return self.__lower_bound
@property
def elapsed_time(self):
return self.__elapsed_time
def __init__(self, best_objective: float, best_bound: float, events: typing.List[MipProgressMessage]):
self.__best_objective = best_objective
self.__best_bound = best_bound
self.__events = events
@staticmethod
def read_from_file(path) -> 'MipTrace':
events = []
best_objective = float('inf')
best_bound = float('-inf')
with open(path, 'r') as fp:
lines = fp.readlines()
lines_it = iter(lines)
for line in lines_it:
if re.match(MipTrace.__MIP_HEADER_PATTERN, line):
break
next(lines_it, None) # read the empty line
for line in lines_it:
line_match = re.match(MipTrace.__MIP_LINE_PATTERN, line)
if not line_match:
break
raw_solution_flag = line_match.group('solution_flag')
raw_incumbent = line_match.group('incumbent')
raw_lower_bound = line_match.group('lower_bound')
raw_elapsed_time = line_match.group('elapsed_time')
has_solution = raw_solution_flag == 'H' or raw_solution_flag == '*'
incumbent = float(raw_incumbent) if raw_incumbent and raw_incumbent != '-' else float('inf')
lower_bound = float(raw_lower_bound) if raw_lower_bound else float('-inf')
elapsed_time = datetime.timedelta(seconds=int(raw_elapsed_time)) if raw_elapsed_time else datetime.timedelta()
events.append(MipTrace.MipProgressMessage(has_solution, incumbent, lower_bound, elapsed_time))
next(lines_it, None)
for line in lines_it:
line_match = re.match(MipTrace.__SUMMARY_PATTERN, line)
if line_match:
raw_objective = line_match.group('objective')
if raw_objective:
best_objective = float(raw_objective)
raw_bound = line_match.group('bound')
if raw_bound:
best_bound = float(raw_bound)
return MipTrace(best_objective, best_bound, events)
def best_cost(self):
return self.__best_objective
def best_cost_time(self):
for event in reversed(self.__events):
if event.has_solution:
return event.elapsed_time
return datetime.timedelta.max
def best_bound(self):
return self.__best_bound
def computation_time(self):
if self.__events:
return self.__events[-1].elapsed_time
return datetime.timedelta.max
class DummyTrace:
def __init__(self):
pass
def best_cost(self):
return float('inf')
def best_bound(self):
return 0
def best_cost_time(self):
return datetime.timedelta(hours=23, minutes=59, seconds=59)
def compare_benchmark_table(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'Carers', 'Visits', 'Visits2', 'MipSolutionLog',
'CpTeamSolutionLog',
'CpWindowsSolutionLog'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
old_simulation_dir = '/home/pmateusz/dev/cordia/simulations/review_simulations_old'
dummy_log = DummyTrace()
problem_configs = [ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m0c3.json'.format(day_number)),
3, 25, 0,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)]
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m5c3.json'.format(day_number)),
3, 20, 5,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m5c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_teams_v25m5c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_windows_v25m5c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m0c5.json'.format(day_number)),
5, 50, 0,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m10c5.json'.format(day_number)),
5, 40, 10,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m10c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_teams_v50m10c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_windows_v50m10c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
logs = []
for problem_config in problem_configs:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if os.path.exists(problem_config.CpTeamSolutionLog):
cp_team_logs = read_traces(problem_config.CpTeamSolutionLog)
if not cp_team_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpTeamSolutionLog))
cp_team_logs = dummy_log
else:
cp_team_log = cp_team_logs[0]
else:
cp_team_logs = dummy_log
if os.path.exists(problem_config.CpWindowsSolutionLog):
cp_window_logs = read_traces(problem_config.CpWindowsSolutionLog)
if not cp_window_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpWindowsSolutionLog))
cp_window_logs = dummy_log
else:
cp_window_log = cp_window_logs[0]
else:
cp_window_logs = dummy_log
if os.path.exists(problem_config.MipSolutionLog):
mip_log = MipTrace.read_from_file(problem_config.MipSolutionLog)
if not mip_log:
warnings.warn('File {0} is empty'.format(problem_config.MipSolutionLog))
mip_log = dummy_log
else:
mip_log = dummy_log
logs.append([problem_config, mip_log, cp_team_log, cp_window_log])
def get_gap(cost: float, lower_bound: float) -> float:
if lower_bound == 0.0:
return float('inf')
return (cost - lower_bound) * 100.0 / lower_bound
def get_delta(cost, cost_to_compare):
return (cost - cost_to_compare) * 100.0 / cost_to_compare
def get_computation_time_label(time: datetime.timedelta) -> str:
return str(time.total_seconds())
data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
data.append(collections.OrderedDict(
date=cp_team_log.date,
visits=problem_config.Visits,
visits_of_two=problem_config.Visits2,
carers=cp_team_log.carers,
penalty=cp_team_log.missed_visit_penalty,
lower_bound=mip_log.best_bound(),
mip_best_cost=mip_log.best_cost(),
mip_best_gap=get_gap(mip_log.best_cost(), mip_log.best_bound()),
mip_best_time=get_computation_time_label(mip_log.best_cost_time()),
team_best_cost=cp_team_log.best_cost(),
team_best_gap=get_gap(cp_team_log.best_cost(), mip_log.best_bound()),
team_best_delta=get_gap(cp_team_log.best_cost(), mip_log.best_cost()),
team_best_time=get_computation_time_label(cp_team_log.best_cost_time()),
windows_best_cost=cp_window_log.best_cost(),
windows_best_gap=get_gap(cp_window_log.best_cost(), mip_log.best_bound()),
windows_best_delta=get_gap(cp_window_log.best_cost(), mip_log.best_cost()),
windows_best_time=get_computation_time_label(cp_window_log.best_cost_time())))
data_frame = pandas.DataFrame(data=data)
def get_duration_label(time_delta: datetime.timedelta) -> str:
assert time_delta.days == 0
hours = int(time_delta.total_seconds() / 3600)
minutes = int(time_delta.total_seconds() / 60 - hours * 60)
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
# return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
return '{0:,.0f}'.format(time_delta.total_seconds())
def get_cost_label(cost: float) -> str:
return '{0:,.0f}'.format(cost)
def get_gap_label(gap: float) -> str:
return '{0:,.2f}'.format(gap)
def get_problem_label(problem, date: datetime.date):
label = '{0:2d} {1}'.format(date.day, problem.Visits)
if problem.Visits2 == 0:
return label
return label + '/' + str(problem.Visits2)
print_data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
best_cost = min([mip_log.best_cost(), cp_team_log.best_cost(), cp_window_log.best_cost()])
print_data.append(collections.OrderedDict(Problem=get_problem_label(problem_config, cp_team_log.date),
Penalty=get_cost_label(cp_team_log.missed_visit_penalty),
LB=get_cost_label(mip_log.best_bound()),
MIP_COST=get_cost_label(mip_log.best_cost()),
MIP_GAP=get_gap_label(get_gap(mip_log.best_cost(), mip_log.best_bound())),
MIP_DELTA=get_gap_label(get_delta(mip_log.best_cost(), best_cost)),
MIP_TIME=get_duration_label(mip_log.best_cost_time()),
TEAMS_GAP=get_gap_label(get_gap(cp_team_log.best_cost(), mip_log.best_bound())),
TEAMS_DELTA=get_gap_label(get_delta(cp_team_log.best_cost(), best_cost)),
TEAMS_COST=get_cost_label(cp_team_log.best_cost()),
TEAMS_Time=get_duration_label(cp_team_log.best_cost_time()),
WINDOWS_COST=get_cost_label(cp_window_log.best_cost()),
WINDOWS_GAP=get_gap_label(get_gap(cp_window_log.best_cost(), mip_log.best_bound())),
WINDOWS_DELTA=get_gap_label(get_delta(cp_window_log.best_cost(), best_cost)),
WINDOWS_TIME=get_duration_label(cp_window_log.best_cost_time())
))
data_frame = pandas.DataFrame(data=print_data)
print(tabulate.tabulate(
data_frame[['Problem', 'Penalty', 'LB', 'MIP_COST', 'MIP_TIME', 'TEAMS_COST', 'TEAMS_Time', 'WINDOWS_COST', 'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
print(tabulate.tabulate(
data_frame[['Problem', 'MIP_GAP', 'MIP_DELTA', 'MIP_TIME', 'TEAMS_GAP', 'TEAMS_DELTA', 'TEAMS_Time', 'WINDOWS_GAP', 'WINDOWS_DELTA',
'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
@functools.total_ordering
class ProblemMetadata:
WINDOW_LABELS = ['', 'F', 'S', 'M', 'L', 'A']
def __init__(self, case: int, visits: int, windows: int):
assert visits == 20 or visits == 50 or visits == 80
assert 0 <= windows < len(ProblemMetadata.WINDOW_LABELS)
self.__case = case
self.__visits = visits
self.__windows = windows
def __eq__(self, other) -> bool:
if isinstance(other, ProblemMetadata):
return self.case == other.case and self.visits == other.visits and self.__windows == other.windows
return False
def __neq__(self, other) -> bool:
return not (self == other)
def __lt__(self, other) -> bool:
assert isinstance(other, ProblemMetadata)
if self.windows != other.windows:
return self.windows < other.windows
if self.visits != other.visits:
return self.visits < other.visits
if self.case != other.case:
return self.case < other.case
return False
@property
def label(self) -> str:
return '{0:>2}{1}'.format(self.instance_number, self.windows_label)
@property
def windows(self) -> int:
return self.__windows
@property
def windows_label(self) -> str:
return ProblemMetadata.WINDOW_LABELS[self.__windows]
@property
def visits(self) -> int:
return self.__visits
@property
def case(self) -> int:
return self.__case
@property
def instance_number(self) -> int:
if self.__visits == 20:
return self.__case
if self.__visits == 50:
return 5 + self.__case
return 8 + self.__case
def compare_literature_table(args, settings):
LIU2019 = 'liu2019'
AFIFI2016 = 'afifi2016'
DECERLE2018 = 'decerle2018'
GAYRAUD2015 = 'gayraud2015'
PARRAGH2018 = 'parragh2018'
BREDSTROM2008 = 'bredstrom2008combined'
BREDSTROM2007 = 'bredstrom2007branchandprice'
InstanceConfig = collections.namedtuple('InstanceConfig', ['name', 'nickname', 'result', 'who', 'is_optimal'])
instance_data = [
InstanceConfig(name='case_1_20_4_2_1', nickname='1N', result=5.13, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_1', nickname='2N', result=4.98, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_1', nickname='3N', result=5.19, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_1', nickname='4N', result=7.21, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_1', nickname='5N', result=5.37, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_1', nickname='6N', result=14.45, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_1', nickname='7N', result=13.02, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_1', nickname='8N', result=34.94, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_1', nickname='9N', result=43.48, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_2_80_16_8_1', nickname='10N', result=12.08, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_20_4_2_2', nickname='1S', result=3.55, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_2', nickname='2S', result=4.27, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_2', nickname='3S', result=3.63, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_2', nickname='4S', result=6.14, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_2', nickname='5S', result=3.93, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_2', nickname='6S', result=8.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_2', nickname='7S', result=8.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_2', nickname='8S', result=9.54, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_2', nickname='9S', result=11.93, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_2', nickname='10S', result=8.54, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_3', nickname='1M', result=3.55, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_3', nickname='2M', result=3.58, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_3', nickname='3M', result=3.33, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_3', nickname='4M', result=5.67, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_3', nickname='5M', result=3.53, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_3', nickname='6M', result=7.7, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_3', nickname='7M', result=7.48, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_3', nickname='8M', result=8.54, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_3', nickname='9M', result=10.92, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_3', nickname='10M', result=7.62, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_4', nickname='1L', result=3.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_4', nickname='2L', result=3.42, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_4', nickname='3L', result=3.29, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_4', nickname='4L', result=5.13, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_4', nickname='5L', result=3.34, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_4', nickname='6L', result=7.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_4', nickname='7L', result=6.88, who=BREDSTROM2007, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_4', nickname='8L', result=8, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_4', nickname='9L', result=10.43, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_4', nickname='10L', result=7.36, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_5', nickname='1H', result=2.95, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_20_4_2_5', nickname='2H', result=2.88, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_20_4_2_5', nickname='3H', result=2.74, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_4_20_4_2_5', nickname='4H', result=4.29, who=GAYRAUD2015, is_optimal=False),
InstanceConfig(name='case_5_20_4_2_5', nickname='5H', result=2.81, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_50_10_5_5', nickname='6H', result=6.48, who=DECERLE2018, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_5', nickname='7H', result=5.71, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_5', nickname='8H', result=6.52, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_5', nickname='9H', result=8.51, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_5', nickname='10H', result=6.31, who=PARRAGH2018, is_optimal=False)
]
instance_dirs = ['/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case20',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case50',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case80']
instance_dict = {instance.name: instance for instance in instance_data}
print_data = []
instance_pattern = re.compile(r'case_(?P<case>\d+)_(?P<visits>\d+)_(?P<carers>\d+)_(?P<synchronized_visits>\d+)_(?P<windows>\d+)')
instance_counter = 1
last_visits = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for instance_dir in instance_dirs:
for instance in instance_data:
instance_log_path = os.path.join(instance_dir, instance.name + '.dat.err.log')
if not os.path.exists(instance_log_path):
continue
solver_logs = read_traces(instance_log_path)
if not solver_logs:
continue
instance = instance_dict[instance.name]
name_match = instance_pattern.match(instance.name)
if not name_match:
continue
first_solver_logs = solver_logs[0]
case = int(name_match.group('case'))
visits = int(name_match.group('visits'))
carers = int(name_match.group('carers'))
synchronized_visits = int(name_match.group('synchronized_visits'))
windows_configuration = int(name_match.group('windows'))
problem_meta = ProblemMetadata(case, visits, windows_configuration)
if last_visits and last_visits != visits:
instance_counter = 1
normalized_result = float('inf')
if first_solver_logs.best_cost(3) < 100:
normalized_result = round(first_solver_logs.best_cost(3), 2)
delta = round((instance.result - normalized_result) / instance.result * 100, 2)
printable_literature_result = str(instance.result)
if instance.is_optimal:
printable_literature_result += '*'
printable_literature_result += 'cite{{{0}}}'.format(instance.who)
print_data.append(collections.OrderedDict(
metadata=problem_meta,
problem=problem_meta.label,
case=instance_counter,
v1=visits - 2 * synchronized_visits,
v2=synchronized_visits,
carers=carers,
time_windows=problem_meta.windows_label,
literature_result=printable_literature_result,
result=normalized_result,
delta=delta,
time=round(first_solver_logs.best_cost_time(3).total_seconds(), 2) if normalized_result != float('inf') else float('inf')
))
last_visits = visits
instance_counter += 1
print_data.sort(key=lambda dict_obj: dict_obj['metadata'])
print(tabulate.tabulate(
pandas.DataFrame(data=print_data)[['problem', 'carers', 'v1', 'v2', 'literature_result', 'result', 'time', 'delta']],
showindex=False,
tablefmt='latex', headers='keys'))
def compare_planner_optimizer_quality(args, settings):
data_file = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file)
figsize = (2.5, 5)
labels = ['Planners', 'Algorithm']
data_frame['travel_time'] = data_frame['Travel Time'].apply(parse_pandas_duration)
data_frame['span'] = data_frame['Span'].apply(parse_pandas_duration)
data_frame['overtime'] = data_frame['Overtime'].apply(parse_pandas_duration)
data_frame_planners = data_frame[data_frame['Type'] == 'Planners']
data_frame_solver = data_frame[data_frame['Type'] == 'Solver']
overtime_per_carer = [list((data_frame_planners['overtime'] / data_frame_planners['Carers']).values),
list((data_frame_solver['overtime'] / data_frame_solver['Carers']).values)]
def to_matplotlib_minutes(value):
return value * 60 * 1000000000
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(overtime_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Overtime per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(10), to_matplotlib_minutes(20), to_matplotlib_minutes(30)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_overtime')
travel_time_per_carer = [list((data_frame_planners['travel_time'] / data_frame_planners['Carers']).values),
list((data_frame_solver['travel_time'] / data_frame_solver['Carers']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(travel_time_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Travel Time per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(30), to_matplotlib_minutes(60),
to_matplotlib_minutes(90), to_matplotlib_minutes(120)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_travel_time')
span_per_client = [list((data_frame_planners['span'] / data_frame_planners['Clients']).values),
list((data_frame_solver['span'] / data_frame_solver['Clients']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(span_per_client, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Visit Span per Client [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(6 * 60), to_matplotlib_minutes(7 * 60), to_matplotlib_minutes(8 * 60),
to_matplotlib_minutes(9 * 60)])
ax.set_ylim(bottom=6 * 60 * 60 * 1000000000)
fig.tight_layout()
rows.plot.save_figure('quality_span')
teams = [list(data_frame_planners['Teams'].values), list(data_frame_solver['Teams'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(teams, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Teams of 2 Carers')
fig.tight_layout()
rows.plot.save_figure('quality_teams')
better_matching = [list(data_frame_planners['Better Matching'].values),
list(data_frame_solver['Better Matching'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(better_matching, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Better Client-Carer Matching')
fig.tight_layout()
rows.plot.save_figure('quality_matching')
def parse_percent(value):
value_to_use = value.replace('%', '')
return float(value_to_use) / 100.0
def parse_duration_seconds(value):
return datetime.timedelta(seconds=value)
def compare_benchmark(args, settings):
data_file_path = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file_path)
data_frame['relative_cost_difference'] = data_frame['Relative Cost Difference'].apply(parse_percent)
data_frame['relative_gap'] = data_frame['Relative Gap'].apply(parse_percent)
data_frame['time'] = data_frame['Time'].apply(parse_duration_seconds)
matplotlib.rcParams.update({'font.size': 18})
labels = ['MS', 'IP']
low_labels = ['Gap', 'Delta', 'Time']
cp_frame = data_frame[data_frame['Solver'] == 'CP']
mip_frame = data_frame[data_frame['Solver'] == 'MIP']
def get_series(frame, configuration):
num_visits, num_visits_of_2 = configuration
filtered_frame = frame[(frame['Visits'] == num_visits) & (frame['Synchronized Visits'] == num_visits_of_2)]
return [filtered_frame['relative_gap'].values, filtered_frame['relative_cost_difference'].values,
filtered_frame['time'].values]
def seconds(value):
return value * 1000000000
def minutes(value):
return 60 * seconds(value)
def hours(value):
return 3600 * seconds(value)
limit_configurations = [[[None, minutes(1) + seconds(15)], [0, minutes(9)]],
[[None, minutes(1) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]],
[[0, minutes(3) + seconds(30)], [0, hours(4) + minutes(30)]]]
yticks_configurations = [
[[0, seconds(15), seconds(30), seconds(45), minutes(1)], [0, minutes(1), minutes(2), minutes(4), minutes(8)]],
[[0, seconds(15), seconds(30), seconds(45), minutes(1), minutes(1) + seconds(15)],
[0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]],
[[0, minutes(1), minutes(2), minutes(3)], [0, hours(1), hours(2), hours(3), hours(4)]]]
problem_configurations = [(25, 0), (25, 5), (50, 0), (50, 10)]
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
return '{0:01d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
def format_percent(x, pox=None):
return int(x * 100.0)
for index, problem_config in enumerate(problem_configurations):
fig, axes = matplotlib.pyplot.subplots(1, 2)
cp_gap, cp_delta, cp_time = get_series(cp_frame, problem_config)
mip_gap, mip_delta, mip_time = get_series(mip_frame, problem_config)
cp_time_limit, mip_time_limit = limit_configurations[index]
cp_yticks, mip_yticks = yticks_configurations[index]
cp_ax, mip_ax = axes
first_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR),
boxprops=dict(color=FOREGROUND_COLOR),
whiskerprops=dict(color=FOREGROUND_COLOR),
capprops=dict(color=FOREGROUND_COLOR))
second_color_config = dict(flierprops=dict(marker='.'),
medianprops=dict(color=FOREGROUND_COLOR2),
boxprops=dict(color=FOREGROUND_COLOR2),
whiskerprops=dict(color=FOREGROUND_COLOR2),
capprops=dict(color=FOREGROUND_COLOR2))
cp_ax.boxplot([cp_gap, cp_delta, []], **second_color_config)
cp_twinx = cp_ax.twinx()
cp_twinx.boxplot([[], [], cp_time], **first_color_config)
cp_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
cp_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
cp_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
cp_ax.set_xlabel('Multistage')
cp_ax.set_xticklabels(low_labels, rotation=45)
cp_ax.set_ylim(bottom=-0.05, top=1)
cp_ax.set_ylabel('Delta, Gap [%]')
cp_twinx.set_ylim(bottom=cp_time_limit[0], top=cp_time_limit[1])
if cp_yticks:
cp_twinx.set_yticks(cp_yticks)
mip_ax.boxplot([mip_gap, mip_delta, []], **second_color_config)
mip_twinx = mip_ax.twinx()
mip_twinx.boxplot([[], [], mip_time], **first_color_config)
mip_twinx.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
mip_twinx.tick_params(axis='y', labelcolor=FOREGROUND_COLOR)
mip_ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_percent))
mip_ax.set_xlabel('IP')
mip_ax.set_xticklabels(low_labels, rotation=45)
mip_ax.set_ylim(bottom=-0.05, top=1)
mip_twinx.set_ylabel('Computation Time [H:MM:SS]', color=FOREGROUND_COLOR)
mip_twinx.set_ylim(bottom=mip_time_limit[0], top=mip_time_limit[1])
if mip_yticks:
mip_twinx.set_yticks(mip_yticks)
fig.tight_layout(w_pad=0.0)
rows.plot.save_figure('benchmark_boxplot_{0}_{1}'.format(problem_config[0], problem_config[1]))
matplotlib.pyplot.cla()
matplotlib.pyplot.close(fig)
def old_debug(args, settings):
problem = rows.plot.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
solution_file = get_or_raise(args, __SOLUTION_FILE_ARG)
schedule = rows.plot.load_schedule(solution_file)
schedule_date = schedule.metadata.begin
carer_dairies = {
carer_shift.carer.sap_number:
next((diary for diary in carer_shift.diaries if diary.date == schedule_date), None)
for carer_shift in problem.carers}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
data_set = []
with rows.plot.create_routing_session() as session:
for route in schedule.routes():
travel_time = datetime.timedelta()
for source, destination in route.edges():
source_loc = location_finder.find(source.visit.service_user)
if not source_loc:
logging.error('Failed to resolve location of %s', source.visit.service_user)
continue
destination_loc = location_finder.find(destination.visit.service_user)
if not destination_loc:
logging.error('Failed to resolve location of %s', destination.visit.service_user)
continue
distance = session.distance(source_loc, destination_loc)
if distance is None:
logging.error('Distance cannot be estimated between %s and %s', source_loc, destination_loc)
continue
travel_time += datetime.timedelta(seconds=distance)
service_time = datetime.timedelta()
for visit in route.visits:
if visit.check_in and visit.check_out:
observed_duration = visit.check_out - visit.check_in
if observed_duration.days < 0:
logging.error('Observed duration %s is negative', observed_duration)
service_time += observed_duration
else:
logging.warning(
'Visit %s is not supplied with information on check-in and check-out information',
visit.key)
service_time += visit.duration
available_time = functools.reduce(operator.add, (event.duration
for event in
carer_dairies[route.carer.sap_number].events))
data_set.append([route.carer.sap_number,
available_time,
service_time,
travel_time,
float(service_time.total_seconds() + travel_time.total_seconds())
/ available_time.total_seconds()])
data_set.sort(key=operator.itemgetter(4))
data_frame = pandas.DataFrame(columns=['Carer', 'Availability', 'Service', 'Travel', 'Usage'], data=data_set)
figure, axis = matplotlib.pyplot.subplots()
indices = numpy.arange(len(data_frame.index))
time_delta_converter = rows.plot.TimeDeltaConverter()
width = 0.35
travel_series = numpy.array(time_delta_converter(data_frame.Travel))
service_series = numpy.array(time_delta_converter(data_frame.Service))
idle_overtime_series = list(data_frame.Availability - data_frame.Travel - data_frame.Service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
service_handle = axis.bar(indices, service_series, width, bottom=time_delta_converter.zero)
travel_handle = axis.bar(indices, travel_series, width,
bottom=service_series + time_delta_converter.zero_num)
idle_handle = axis.bar(indices, idle_series, width,
bottom=service_series + travel_series + time_delta_converter.zero_num)
overtime_handle = axis.bar(indices, overtime_series, width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend((travel_handle, service_handle, idle_handle, overtime_handle),
('Travel', 'Service', 'Idle', 'Overtime'), loc='upper right')
matplotlib.pyplot.show()
def show_working_hours(args, settings):
__WIDTH = 0.25
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
shift_file = get_or_raise(args, __FILE_ARG)
shift_file_base_name, shift_file_ext = os.path.splitext(os.path.basename(shift_file))
output_file_base_name = getattr(args, __OUTPUT, shift_file_base_name)
__EVENT_TYPE_OFFSET = {'assumed': 2, 'contract': 1, 'work': 0}
__EVENT_TYPE_COLOR = {'assumed': color_map.colors[0], 'contract': color_map.colors[4], 'work': color_map.colors[2]}
handles = {}
frame = pandas.read_csv(shift_file)
dates = frame['day'].unique()
for current_date in dates:
frame_to_use = frame[frame['day'] == current_date]
carers = frame_to_use['carer'].unique()
figure, axis = matplotlib.pyplot.subplots()
try:
current_date_to_use = datetime.datetime.strptime(current_date, '%Y-%m-%d')
carer_index = 0
for carer in carers:
carer_frame = frame_to_use[frame_to_use['carer'] == carer]
axis.bar(carer_index + 0.25, 24 * 3600, 0.75, bottom=0, color='grey', alpha=0.3)
for index, row in carer_frame.iterrows():
event_begin = datetime.datetime.strptime(row['begin'], '%Y-%m-%d %H:%M:%S')
event_end = datetime.datetime.strptime(row['end'], '%Y-%m-%d %H:%M:%S')
handle = axis.bar(carer_index + __EVENT_TYPE_OFFSET[row['event type']] * __WIDTH,
(event_end - event_begin).total_seconds(),
__WIDTH,
bottom=(event_begin - current_date_to_use).total_seconds(),
color=__EVENT_TYPE_COLOR[row['event type']])
handles[row['event type']] = handle
carer_index += 1
axis.legend([handles['work'], handles['contract'], handles['assumed']],
['Worked', 'Available', 'Forecast'], loc='upper right')
axis.grid(linestyle='dashed')
axis.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_time))
axis.yaxis.set_ticks(numpy.arange(0, 24 * 3600, 2 * 3600))
axis.set_ylim(6 * 3600, 24 * 60 * 60)
rows.plot.save_figure(output_file_base_name + '_' + current_date)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compute_overtime(frame):
idle_overtime_series = list(frame.Availability - frame.Travel - frame.Service)
idle_series = numpy.array(
list(map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(list(map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(), idle_overtime_series)))
return overtime_series
class Node:
def __init__(self,
index: int,
next: int,
visit: rows.model.visit.Visit,
visit_start_min: datetime.datetime,
visit_start_max: datetime.datetime,
break_start: typing.Optional[datetime.datetime],
break_duration: datetime.timedelta,
travel_duration: datetime.timedelta):
self.__index = index
self.__next = next
self.__visit = visit
self.__visit_start_min = visit_start_min
self.__visit_start_max = visit_start_max
self.__break_start = break_start
self.__break_duration = break_duration
self.__travel_duration = travel_duration
@property
def index(self) -> int:
return self.__index
@property
def next(self) -> int:
return self.__next
@property
def visit_key(self) -> int:
return self.__visit.key
@property
def visit_start(self) -> datetime.datetime:
return datetime.datetime.combine(self.__visit.date, self.__visit.time)
@property
def visit_start_min(self) -> datetime.datetime:
return self.__visit_start_min
@property
def visit_start_max(self) -> datetime.datetime:
return self.__visit_start_max
@property
def carer_count(self) -> int:
return self.__visit.carer_count
@property
def visit_duration(self) -> datetime.timedelta:
return self.__visit.duration
@property
def break_start(self) -> datetime.datetime:
return self.__break_start
@property
def break_duration(self) -> datetime.timedelta:
return self.__break_duration
@property
def travel_duration(self) -> datetime.timedelta:
return self.__travel_duration
@property
def service_user(self) -> str:
return self.__visit.service_user
class Mapping:
def __init__(self, routes, problem, settings, time_window_span):
self.__index_to_node = {}
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
local_routes = {}
current_index = 0
def find_visit(item) -> rows.model.visit.Visit:
current_diff = sys.maxsize
visit_match = None
for visit_batch in problem.visits:
if visit_batch.service_user != item.service_user:
continue
for visit in visit_batch.visits:
if visit.date != item.date or visit.tasks != item.tasks:
continue
if item.key == visit.key:
# exact match
return visit
visit_total_time = visit.time.hour * 3600 + visit.time.minute * 60
item_total_time = item.time.hour * 3600 + item.time.minute * 60
diff_total_time = abs(visit_total_time - item_total_time)
if diff_total_time <= time_window_span.total_seconds() and diff_total_time < current_diff:
visit_match = visit
current_diff = diff_total_time
assert visit_match is not None
return visit_match
current_index = 0
with rows.plot.create_routing_session() as routing_session:
for route in routes:
local_route = []
previous_visit = None
previous_index = None
current_visit = None
break_start = None
break_duration = datetime.timedelta()
for item in route.nodes:
if isinstance(item, rows.model.past_visit.PastVisit):
current_visit = item.visit
if previous_visit is None:
if break_start is None:
diary = problem.get_diary(route.carer, current_visit.date)
break_start = diary.events[0].begin - datetime.timedelta(minutes=30)
node = Node(current_index,
current_index + 1,
rows.model.visit.Visit(date=current_visit.date,
time=break_start,
duration=datetime.timedelta(),
service_user=current_visit.service_user),
break_start,
break_start,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[current_index] = node
local_route.append(node)
current_index += 1
previous_visit = current_visit
previous_index = current_index
break_start = None
break_duration = datetime.timedelta()
current_index += 1
continue
previous_location = user_tag_finder.find(previous_visit.service_user)
current_location = user_tag_finder.find(current_visit.service_user)
travel_time = datetime.timedelta(seconds=routing_session.distance(previous_location, current_location))
previous_visit_match = find_visit(previous_visit)
node = Node(previous_index,
current_index,
previous_visit,
previous_visit_match.datetime - time_window_span,
previous_visit_match.datetime + time_window_span,
break_start,
break_duration,
travel_time)
self.__index_to_node[previous_index] = node
local_route.append(node)
break_start = None
break_duration = datetime.timedelta()
previous_visit = current_visit
previous_index = current_index
current_index += 1
if isinstance(item, rows.model.rest.Rest):
if break_start is None:
break_start = item.start_time
else:
break_start = item.start_time - break_duration
break_duration += item.duration
visit_match = find_visit(previous_visit)
node = Node(previous_index,
-1,
previous_visit,
visit_match.datetime - time_window_span,
visit_match.datetime + time_window_span,
break_start,
break_duration,
datetime.timedelta())
self.__index_to_node[previous_index] = node
local_route.append(node)
local_routes[route.carer] = local_route
self.__routes = local_routes
service_user_to_index = collections.defaultdict(list)
for index in self.__index_to_node:
node = self.__index_to_node[index]
service_user_to_index[node.service_user].append(index)
self.__siblings = {}
for service_user in service_user_to_index:
num_indices = len(service_user_to_index[service_user])
for left_pos in range(num_indices):
left_index = service_user_to_index[service_user][left_pos]
left_visit = self.__index_to_node[left_index]
if left_visit.carer_count == 1:
continue
for right_pos in range(left_pos + 1, num_indices):
right_index = service_user_to_index[service_user][right_pos]
right_visit = self.__index_to_node[right_index]
if right_visit.carer_count == 1:
continue
if left_visit.visit_start_min == right_visit.visit_start_min and left_visit.visit_start_max == right_visit.visit_start_max:
assert left_index != right_index
self.__siblings[left_index] = right_index
self.__siblings[right_index] = left_index
def indices(self):
return list(self.__index_to_node.keys())
def routes(self) -> typing.Dict[rows.model.carer.Carer, typing.List[Node]]:
return self.__routes
def node(self, index: int) -> Node:
return self.__index_to_node[index]
def find_index(self, visit_key: int) -> int:
for index in self.__index_to_node:
if self.__index_to_node[index].visit_key == visit_key:
return index
return None
def sibling(self, index: int) -> typing.Optional[Node]:
if index in self.__siblings:
sibling_index = self.__siblings[index]
return self.__index_to_node[sibling_index]
return None
def graph(self) -> networkx.DiGraph:
edges = []
for carer in self.__routes:
for node in self.__routes[carer]:
if node.next != -1:
assert node.index != node.next
edges.append([node.index, node.next])
sibling_node = self.sibling(node.index)
if sibling_node is not None:
if node.index < sibling_node.index:
assert node.index != sibling_node.index
edges.append([node.index, sibling_node.index])
if node.next != -1:
assert sibling_node.index != node.next
edges.append([sibling_node.index, node.next])
return networkx.DiGraph(edges)
def create_mapping(settings, problem, schedule) -> Mapping:
mapping_time_windows_span = datetime.timedelta(minutes=90)
return Mapping(schedule.routes, problem, settings, mapping_time_windows_span)
class StartTimeEvaluator:
def __init__(self, mapping: Mapping, problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
self.__mapping = mapping
self.__problem = problem
self.__schedule = schedule
self.__sorted_indices = list(networkx.topological_sort(self.__mapping.graph()))
self.__initial_start_times = self.__get_initial_start_times()
def get_start_times(self, duration_callback) -> typing.List[datetime.datetime]:
start_times = copy.copy(self.__initial_start_times)
for index in self.__sorted_indices:
node = self.__mapping.node(index)
current_sibling_node = self.__mapping.sibling(node.index)
if current_sibling_node:
max_start_time = max(start_times[node.index], start_times[current_sibling_node.index])
start_times[node.index] = max_start_time
if max_start_time > start_times[current_sibling_node.index]:
start_times[current_sibling_node.index] = max_start_time
if current_sibling_node.next is not None and current_sibling_node.next != -1:
start_times[current_sibling_node.next] = self.__get_next_arrival(current_sibling_node, start_times, duration_callback)
if node.next is None or node.next == -1:
continue
next_arrival = self.__get_next_arrival(node, start_times, duration_callback)
if next_arrival > start_times[node.next]:
start_times[node.next] = next_arrival
return start_times
def get_delays(self, start_times: typing.List[datetime.datetime]) -> typing.List[datetime.timedelta]:
return [start_times[index] - self.__mapping.node(index).visit_start_max for index in self.__mapping.indices()]
def __get_next_arrival(self, local_node: Node, start_times, duration_callback) -> datetime.datetime:
break_done = False
if local_node.break_duration is not None \
and local_node.break_start is not None \
and local_node.break_start + local_node.break_duration <= start_times[local_node.index]:
break_done = True
local_visit_key = self.__mapping.node(local_node.index).visit_key
local_next_arrival = start_times[local_node.index] + duration_callback(local_visit_key) + local_node.travel_duration
if not break_done and local_node.break_start is not None:
if local_next_arrival >= local_node.break_start:
local_next_arrival += local_node.break_duration
else:
local_next_arrival = local_node.break_start + local_node.break_duration
return local_next_arrival
def __get_initial_start_times(self) -> typing.List[datetime.datetime]:
start_times = [self.__mapping.node(index).visit_start_min for index in self.__mapping.indices()]
carer_routes = self.__mapping.routes()
for carer in carer_routes:
diary = self.__problem.get_diary(carer, self.__schedule.date)
assert diary is not None
nodes = carer_routes[carer]
nodes_it = iter(nodes)
first_visit_node = next(nodes_it)
start_min = max(first_visit_node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[first_visit_node.index] = start_min
for node in nodes_it:
start_min = max(node.visit_start_min, diary.events[0].begin - datetime.timedelta(minutes=30))
start_times[node.index] = start_min
return start_times
class EssentialRiskinessEvaluator:
def __init__(self, settings, history, problem, schedule):
self.__settings = settings
self.__history = history
self.__problem = problem
self.__schedule = schedule
self.__schedule_start = datetime.datetime.combine(self.__schedule.date, datetime.time())
self.__mapping = None
self.__sample = None
self.__start_times = None
self.__delay = None
def run(self):
self.__mapping = create_mapping(self.__settings, self.__problem, self.__schedule)
history_time_windows_span = datetime.timedelta(hours=2)
self.__sample = self.__history.build_sample(self.__problem, self.__schedule.date, history_time_windows_span)
self.__start_times = [[datetime.datetime.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
self.__delay = [[datetime.timedelta.max for _ in range(self.__sample.size)] for _ in self.__mapping.indices()]
start_time_evaluator = StartTimeEvaluator(self.__mapping, self.__problem, self.__schedule)
for scenario in range(self.__sample.size):
def get_visit_duration(visit_key: int) -> datetime.timedelta:
if visit_key is None:
return datetime.timedelta()
return self.__sample.visit_duration(visit_key, scenario)
scenario_start_times = start_time_evaluator.get_start_times(get_visit_duration)
delay = start_time_evaluator.get_delays(scenario_start_times)
for index in range(len(scenario_start_times)):
self.__start_times[index][scenario] = scenario_start_times[index]
self.__delay[index][scenario] = delay[index]
def calculate_index(self, visit_key: int) -> float:
visit_index = self.__find_index(visit_key)
records = [local_delay.total_seconds() for local_delay in self.__delay[visit_index]]
records.sort()
num_records = len(records)
if records[num_records - 1] <= 0:
return 0.0
total_delay = 0.0
position = num_records - 1
while position >= 0 and records[position] >= 0:
total_delay += records[position]
position -= 1
if position == -1:
return float('inf')
delay_budget = 0
while position > 0 and delay_budget + float(position + 1) * records[position] + total_delay > 0:
delay_budget += records[position]
position -= 1
delay_balance = delay_budget + float(position + 1) * records[position] + total_delay
if delay_balance < 0:
riskiness_index = min(0.0, records[position + 1])
assert riskiness_index <= 0.0
remaining_balance = total_delay + delay_budget + float(position + 1) * riskiness_index
assert remaining_balance >= 0.0
riskiness_index -= math.ceil(remaining_balance / float(position + 1))
assert riskiness_index * float(position + 1) + delay_budget + total_delay <= 0.0
return -riskiness_index
elif delay_balance > 0:
return float('inf')
else:
return records[position]
def get_delays(self, visit_key) -> typing.List[datetime.timedelta]:
index = self.__find_index(visit_key)
return self.__delay[index]
def find_carer(self, visit_key: int) -> typing.Optional[rows.model.carer.Carer]:
for carer in self.__mapping.routes():
for node in self.__mapping.routes()[carer]:
if node.visit_key == visit_key:
return carer
return None
def find_route(self, index: int) -> typing.Optional[typing.List[Node]]:
routes = self.__mapping.routes()
for carer in routes:
for node in routes[carer]:
if node.index == index:
return routes[carer]
return None
def print_route_for_visit(self, visit_key):
carer = self.find_carer(visit_key)
self.print_route(carer)
def print_route(self, carer):
route = self.__mapping.routes()[carer]
data = [['index', 'key', 'visit_start', 'visit_duration', 'travel_duration', 'break_start', 'break_duration']]
for node in route:
if node.visit_key is None:
duration = 0
else:
duration = int(self.__sample.visit_duration(node.visit_key, 0).total_seconds())
data.append([node.index,
node.visit_key,
int(self.__datetime_to_delta(self.__start_times[node.index][0]).total_seconds()),
duration,
int(node.travel_duration.total_seconds()),
int(self.__datetime_to_delta(node.break_start).total_seconds()) if node.break_start is not None else 0,
int(node.break_duration.total_seconds())])
print(tabulate.tabulate(data))
def print_start_times(self, visit_key: int):
print('Start Times - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number,
int(self.__datetime_to_delta(self.__start_times[selected_index][scenario_number]).total_seconds())))
def print_delays(self, visit_key: int):
print('Delays - Visit {0}:'.format(visit_key))
selected_index = self.__find_index(visit_key)
for scenario_number in range(self.__sample.size):
print('{0:<4}{1}'.format(scenario_number, int(self.__delay[selected_index][scenario_number].total_seconds())))
def visit_keys(self) -> typing.List[int]:
visit_keys = [self.__mapping.node(index).visit_key for index in self.__mapping.indices() if self.__mapping.node(index).visit_key is not None]
visit_keys.sort()
return visit_keys
def __find_index(self, visit_key: int) -> typing.Optional[int]:
for index in self.__mapping.indices():
if self.__mapping.node(index).visit_key == visit_key:
return index
return None
def __datetime_to_delta(self, value: datetime.datetime) -> datetime.timedelta:
return value - self.__schedule_start
def to_frame(self):
records = []
for visit_index in self.__mapping.indices():
visit_key = self.__mapping.node(visit_index).visit_key
if visit_key is None:
continue
for scenario_number in range(self.__sample.size):
records.append({'visit': visit_key, 'scenario': scenario_number, 'delay': self.__delay[visit_index][scenario_number]})
return | pandas.DataFrame(data=records) | pandas.DataFrame |
#dependencies
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import normalize
import itertools
import matplotlib.pyplot as plt
import pandas as pd
#function defination to plot the confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
train_data = | pd.read_csv('criminal_train.csv') | pandas.read_csv |
""" LSTM MODEL STUFF """
import numpy as np
import scipy.io as sio
import json
import tensorflow as tf
from pandas import DataFrame, Series, concat
from tensorflow.python.keras.layers import Input, Dense, LSTM
from tensorflow.python.keras.models import Sequential
from random import randrange
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import cross_val_score, GridSearchCV, KFold
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
#-------------------------------------------------------------------------------
# Set keras modules to variables; define helper functions for data processing.
# these functions are modified (slightly) from this tutorial:
# https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = | concat(cols, axis=1) | pandas.concat |
from __future__ import print_function
# from builtins import str
# from builtins import object
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import os
from .data_utils import make_dir
class XlsxRecorder(object):
"""
xlsx recorder for results
including two recorder: one for current experiments, record details of results changed by iteration
the other is for record the summary of different expreiments, which is saved by summary_path
1. detailed results: saved in fig_save_path/results.xlsx
** Sheet1: #total_filename x #metrics, along each row direction, are the records by iteraton
** batch_0: #batch_filename x #metric_by_label , along each column direction, are the records by iteration
** batch_1: same as batch_0
** ......
2. task results: saved in ../data/summary.xlsx
** Sheet1: task_name * #metrics recorded by iteration
"""
def __init__(self, expr_name, saving_path='', folder_name=''):
self.expr_name = expr_name
if not len(saving_path):
self.saving_path = '../data/'+expr_name #saving_path
else:
self.saving_path = saving_path
self.saving_path = os.path.abspath(self.saving_path)
self.folder_name = folder_name
if len(folder_name):
self.saving_path = os.path.join(self.saving_path, folder_name)
"""path of saving excel, default is the same as the path of saving figures"""
self.writer_path = None
self.xlsx_writer = None
self.summary_path = '../data/summary.xlsx'
"""the path for summary, which can record results from different experiments"""
self.measures = ['iou', 'precision', 'recall', 'dice']
"""measures to record"""
self.batch_count = {}
self.row_space = 50
self.column_space = 10
self.start_row = 0
self.summary = None
self.avg_buffer = {}
self.iter_info_buffer = []
self.name_list_buffer = []
self.init_summary()
print("the update space in detailed files is {}".format(self.row_space))
def init_summary(self):
""" init two recorders, initilzation would create a new recorder for this experiment, recording all details
at the same time it would load the data from summary recorder, then it would append the new experiment summary to summary recorder
"""
if not os.path.exists(self.saving_path ):
os.makedirs(self.saving_path )
self.writer_path = os.path.join(self.saving_path, 'results.xlsx')
writer = pd.ExcelWriter(self.writer_path, engine='xlsxwriter')
df = pd.DataFrame([])
df.to_excel(writer)
worksheet = writer.sheets['Sheet1']
worksheet.set_column(1, 1000, 30)
writer.save()
writer.close()
self.writer_book = load_workbook(self.writer_path)
self.xlsx_writer = pd.ExcelWriter(self.writer_path, engine='openpyxl')
self.xlsx_writer.book = self.writer_book
self.xlsx_writer.sheets = dict((ws.title, ws) for ws in self.writer_book.worksheets)
if not os.path.exists(self.summary_path):
writer = pd.ExcelWriter(self.summary_path, engine = 'xlsxwriter')
df = pd.DataFrame([])
df.to_excel(writer)
worksheet = writer.sheets['Sheet1']
worksheet.set_column(1, 1000, 30)
writer.save()
writer.close()
def set_batch_based_env(self,name_list,batch_id):
# need to be set before each saving operation
self.name_list = name_list
self.sheet_name = 'batch_'+ str(batch_id)
if self.sheet_name not in self.batch_count:
self.batch_count[self.sheet_name] = -1
self.name_list_buffer += self.name_list
self.batch_count[self.sheet_name] += 1
count = self.batch_count[self.sheet_name]
self.start_row = count * self.row_space
self.start_column = 0
def set_summary_based_env(self):
self.sheet_name = 'Sheet1'
self.start_row = 0
def put_into_avg_buff(self, result, iter_info):
"""
# avg_buffer is to save avg_results from each iter, from each batch
# iter_info: string contains iter info
# the buffer is organized as { iter_info1: results_list_iter1, iter_info2:results_list_iter2}
# results_list_iter1 : [batch1_res_iter1, batch2_res_iter1]
# batch1_res_iter1:{metric1: result, metric2: result}
"""
if iter_info not in self.avg_buffer:
self.avg_buffer[iter_info] = []
self.iter_info_buffer += [iter_info]
self.avg_buffer[iter_info] += [result]
def merge_from_avg_buff(self):
"""
# iter_info: string contains iter info
# the buffer is organized as { iter_info1: results_list_iter1, iter_info2:results_list_iter2}
# results_list_iter1 : [batch1_res_iter1, batch2_res_iter1]
# batch1_res_iter1:{metric1: result, metric2: result}
# return: dic: {iter_info1:{ metric1: nFile x 1 , metric2:...}, iter_info2:....}
"""
metric_avg_dic={}
for iter_info,avg_list in list(self.avg_buffer.items()):
metric_results_tmp = {metric: [result[metric] for result in avg_list] for metric in
self.measures}
metric_avg_dic[iter_info] = {metric: np.concatenate(metric_results_tmp[metric], 0) for metric in metric_results_tmp}
return metric_avg_dic
def saving_results(self,sched, results=None, info=None, averaged_results=None):
"""
the input results should be different for each sched
batch: the input result should be dic , each measure inside should be B x N_label
buffer: the input result should be dic, each measure inside should be N_img x 1
summary: no input needed, the summary could be got from the buffer
:param results:
:param sched:
:param info:
:return:
"""
if sched == 'batch':
label_info = info['label_info']
iter_info = info['iter_info']
self.saving_all_details(results,averaged_results,label_info,iter_info)
elif sched == 'buffer':
iter_info = info['iter_info']
self.put_into_avg_buff(results,iter_info)
elif sched == 'summary':
self.summary_book = load_workbook(self.summary_path)
self.summary_writer = pd.ExcelWriter(self.summary_path,engine='openpyxl')
self.set_summary_based_env()
metric_avg_dic = self.merge_from_avg_buff()
self.saving_label_averaged_results(metric_avg_dic)
self.saving_summary(metric_avg_dic)
self.save_figs_for_batch(metric_avg_dic)
self.xlsx_writer.close()
self.summary_writer.close()
else:
raise ValueError("saving method not implemented")
def saving_label_averaged_results(self, results):
"""
# saved by iteration
# results: dic: {iter_info1:{ metric1: nFile x 1 , metric2:...}, iter_info2:....}
# saving the n_File*nAvgMetrics into xlsx_writer
# including the iter_info
"""
start_column = 0
results_summary = {iter_info: {metric:np.mean(results[iter_info][metric]).reshape(1,1) for metric in self.measures} for iter_info in self.iter_info_buffer}
for iter_info in self.iter_info_buffer:
iter_expand = {metric: np.squeeze(np.concatenate((results[iter_info][metric], results_summary[iter_info][metric]), 0)) for metric in self.measures}
df = pd.DataFrame.from_dict(iter_expand)
df = df[self.measures]
try:
df.index = | pd.Index(self.name_list_buffer+['average']) | pandas.Index |
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_farm as wf
import windpowerlib.wind_turbine as wt
import windpowerlib.wind_turbine_cluster as wtc
import windpowerlib.turbine_cluster_modelchain as tc_mc
class TestTurbineClusterModelChain:
@classmethod
def setup_class(self):
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack((temperature_2m,
temperature_10m,
pressure_0m,
wind_speed_8m,
wind_speed_10m,
roughness_length)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature',
'pressure',
'wind_speed',
'wind_speed',
'roughness_length']),
np.array([2, 10, 0, 8, 10, 0])])
self.test_turbine = {'hub_height': 100,
'rotor_diameter': 80,
'turbine_type': 'E-126/4200'}
self.test_turbine_2 = {'hub_height': 90,
'rotor_diameter': 60,
'turbine_type': 'V90/2000',
'nominal_power': 2000000.0}
self.test_farm = {'wind_turbine_fleet': [
{'wind_turbine':
wt.WindTurbine(**self.test_turbine),
'number_of_turbines': 3}]}
self.test_farm_2 = {'name': 'test farm',
'wind_turbine_fleet':
[{'wind_turbine':
wt.WindTurbine(**self.test_turbine),
'number_of_turbines': 3},
{'wind_turbine':
wt.WindTurbine(**self.test_turbine_2),
'number_of_turbines': 3}]}
self.test_cluster = {'name': 'example_cluster',
'wind_farms': [wf.WindFarm(**self.test_farm),
wf.WindFarm(**self.test_farm_2)]}
def test_run_model(self):
parameters = {'wake_losses_model': 'dena_mean',
'smoothing': False,
'standard_deviation_method': 'turbulence_intensity',
'smoothing_order': 'wind_farm_power_curves'}
# Test modelchain with default values
power_output_exp = pd.Series(data=[4198361.4830405945,
8697966.121234536],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=wf.WindFarm(**self.test_farm), **parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
# Test constant efficiency
parameters['wake_losses_model'] = 'wind_farm_efficiency'
test_wind_farm = wf.WindFarm(**self.test_farm)
test_wind_farm.efficiency = 0.9
power_output_exp = pd.Series(data=[4420994.806920091,
8516983.651623568],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=test_wind_farm, **parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
# Test smoothing
parameters['smoothing'] = 'True'
test_wind_farm = wf.WindFarm(**self.test_farm)
test_wind_farm.efficiency = 0.9
power_output_exp = pd.Series(data=[4581109.03847444,
8145581.914240712],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=test_wind_farm, **parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
# Test wind farm with different turbine types (smoothing)
test_wind_farm = wf.WindFarm(**self.test_farm_2)
test_wind_farm.efficiency = 0.9
power_output_exp = pd.Series(data=[6777087.9658657005,
12180374.036660176],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=test_wind_farm, **parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
# Test other smoothing order
parameters['smoothing_order'] = 'turbine_power_curves'
test_wind_farm = wf.WindFarm(**self.test_farm_2)
test_wind_farm.efficiency = 0.9
power_output_exp = pd.Series(data=[6790706.001026006,
12179417.461328149],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=test_wind_farm, **parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
def test_run_model_turbine_cluster(self):
parameters = {'wake_losses_model': 'dena_mean',
'smoothing': False,
'standard_deviation_method': 'turbulence_intensity',
'smoothing_order': 'wind_farm_power_curves'}
# Test modelchain with default values
power_output_exp = pd.Series(data=[10363047.755401008,
21694496.68221325],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=wtc.WindTurbineCluster(**self.test_cluster),
**parameters)
test_tc_mc.run_model(self.weather_df)
assert_series_equal(test_tc_mc.power_output, power_output_exp)
# Test constant efficiency
parameters['wake_losses_model'] = 'wind_farm_efficiency'
test_cluster = wtc.WindTurbineCluster(**self.test_cluster)
for farm in test_cluster.wind_farms:
farm.efficiency = 0.9
power_output_exp = pd.Series(data=[10920128.570572512,
21273144.336885825],
name='feedin_power_plant')
test_tc_mc = tc_mc.TurbineClusterModelChain(
power_plant=test_cluster, **parameters)
test_tc_mc.run_model(self.weather_df)
| assert_series_equal(test_tc_mc.power_output, power_output_exp) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Ivar
"""
import sys
import os
#from scipy import interp
import pandas as pd
import numpy as np
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, plot_confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import fbeta_score, make_scorer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
import xgboost as xgb
from thundersvm import SVC
from Util import *
from multiprocessing import Pool, Manager, Process, Lock
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
#from sklearn.feature_selection import SelectKBest
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import GenericUnivariateSelect
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import RFECV
from sklearn.linear_model import Lasso
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LassoCV
from sklearn import datasets, linear_model
from genetic_selection import GeneticSelectionCV
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.svm import SVR
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE, RandomOverSampler, ADASYN
from imblearn.under_sampling import RandomUnderSampler
from mlxtend.classifier import EnsembleVoteClassifier
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
#from pickle import dump, load
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import cross_validate
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier
import time
import matplotlib.pyplot as plt
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from mlxtend.plotting import plot_confusion_matrix
class Classification:
def __init__ (self, dat):
self.dat = dat
#self.evals = manager.list([{} for i in range(len(self.fetures)*len(self.dat["norms"]))])
def execute(self):
outdir = self.dat["outputdir"]+Util.now()
Util.makedir(outdir)
df = pd.read_csv(self.dat["csvfile"])
columns = df.columns.tolist()
print(df)
print(columns)
#exit()
#prefixes = ('lbp-')
#prefixes = ('LPB')
prefixes = ()
columns.remove("image")
columns.remove("target")
#print(columns)
for word in columns[:]:
if word.startswith(prefixes):
columns.remove(word)
"""
cc = 0
for word in columns[:]:
if word.startswith("log-"): #1023
#if word.startswith("wavelet"): 372
#if word.startswith("lbp-"):# 93
#if word.startswith("LPB_"): 102
cc+=1
print("cc", cc)
"""
classes = list(enumerate(df.target.astype('category').cat.categories))
classes = [dd[1] for dd in classes]
print("datcat", classes)
evals = {}
fig = plt.figure(figsize=(5,5))
plt.title('')
plt.plot([0, 1], [0, 1],'r--')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
Xo = df[columns]
Y = df.target.astype('category').cat.codes
Xo = Xo.loc[:,Xo.apply(pd.Series.nunique) != 1]
#Classification.featureSelection2(columns, Xo, Y, self.dat["classifiers"], outdir)
cmdats = {}
#iskfold = True
#iskfold = False
fpr, tpr = None, None
for name, argms in self.dat["classifiers"].items():
clsr = Classification.classifiers()
if name in clsr:
print("len(columns)", len(columns))
X = Xo.copy(deep=True)
if argms["scale"] != "None":
scaler = Classification.getScale(argms["scale"])
X = pd.DataFrame(scaler.fit_transform(X))
#sel = VarianceThreshold(threshold=0.12)
#trainX = sel.fit_transform(trainX)
trainX, testX, trainY, testY = train_test_split(
X, Y, stratify=Y, test_size=self.dat["testing"], random_state=7)
"""
if argms["scale"] != "None":
scaler = Classification.getScale(argms["scale"])
trainX = pd.DataFrame(scaler.fit_transform(trainX))
testX = pd.DataFrame(scaler.transform(testX))
"""
m = clsr[name]
clf = m["model"]
if len(argms["modelparameters"])>0:
clf = clf.set_params(**argms["modelparameters"])
scores = Classification.evaluation_tmp()
if self.dat["iskfold"]:
ytrue, ypred, fpr, tpr, roc_auc = Classification.kfolfcv(clf, X, Y, scores)
print("scoers", scores)
Classification.evaluationmean(scores)
evals[name]={"metrics":scores, "ytrue":ytrue, "ypred":ypred}
cmdats[name] = {"ytrue":ytrue, "ypred":ypred}
print(name, evals[name]["metrics"])
else:
#xx% train yy% test
#training
clf.fit(trainX, trainY)
#testing
pre = clf.predict(testX)
Classification.evaluation(scores, testY, pre)
Classification.evaluationmean(scores)
evals[name]={"metrics":scores, "ytrue":testY.tolist(), "ypred":pre.tolist()}
cmdats[name] = {"ytrue":testY, "ypred":pre}
print(name, evals[name]["metrics"])
#curver roc-auc
probs = clf.predict_proba(testX)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(testY, preds)
roc_auc = metrics.auc(fpr, tpr)
print("roc_auc", roc_auc)
#plot auc-roc curves
plt.plot(fpr, tpr, label = name+' (AUC) = %0.2f' % roc_auc)
#save best parameters
if hasattr(clf, 'best_params_'):
print(name, clf.best_params_)
#save fpr, tpr
#FPR TPR
fprtprdf = | pd.DataFrame({"FPR":fpr, "TPR":tpr}) | pandas.DataFrame |
#!/usr/bin/env python3
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pandas.testing as pdtest
from pandas.api.types import is_datetime64_dtype
from sklearn.base import TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_array, check_is_fitted
from datafold.pcfold import InitialCondition, TSCDataFrame, TSCMetric, TSCScoring
from datafold.pcfold.timeseries.collection import TSCException
from datafold.utils.general import if1dim_rowvec
# types allowed for transformation
TransformType = Union[TSCDataFrame, np.ndarray]
# types allowed for time predictions
TimePredictType = TSCDataFrame
InitialConditionType = Union[TSCDataFrame, np.ndarray]
class TSCBaseMixin(object):
"""Base class for Mixin's in *datafold*.
See Also
--------
:py:class:`.TSCTransformerMixin`
:py:class:`.TSCPredictMixin`
"""
def _has_feature_names(self, _obj):
# True, for pandas.DataFrame or TSCDataFrame
return isinstance(_obj, pd.DataFrame)
def _read_fit_params(self, attrs: Optional[List[Tuple[str, Any]]], fit_params):
return_values = []
if attrs is not None:
for attr in attrs:
return_values.append(fit_params.pop(attr[0], attr[1]))
if fit_params != {}:
raise KeyError(f"fit_params.keys = {fit_params.keys()} not supported")
if len(return_values) == 0:
return None
elif len(return_values) == 1:
return return_values[0]
else:
return return_values
def _X_to_numpy(self, X):
"""Returns a numpy array of the data."""
if self._has_feature_names(X):
X = X.to_numpy()
# a row in a df is always a single sample (which requires to be
# represented in a 2D matrix)
return if1dim_rowvec(X)
else:
return X
def _check_attributes_set_up(self, check_attributes):
try:
check_is_fitted(
self,
attributes=check_attributes,
)
except NotFittedError:
raise RuntimeError(
f"{check_attributes} are not available for estimator {self}. "
f"Please report bug."
)
def _validate_datafold_data(
self,
X: Union[TSCDataFrame, np.ndarray],
ensure_tsc: bool = False,
array_kwargs: Optional[dict] = None,
tsc_kwargs: Optional[dict] = None,
):
"""Provides a general function to validate data that is input to datafold
functions -- it can be overwritten if a concrete implementation requires
different checks.
This function is very close to scikit-learn BaseEstimator._validate_data (which
was introduced in 0.23.1).
Parameters
----------
X
ensure_feature_name_type
array_kwargs
tsc_kwargs
Returns
-------
"""
# defaults to empty dictionary if None
array_kwargs = array_kwargs or {}
tsc_kwargs = tsc_kwargs or {}
if ensure_tsc and not isinstance(X, TSCDataFrame):
raise TypeError(
f"Input 'X' is of type {type(X)} but a TSCDataFrame is required."
)
if type(X) != TSCDataFrame:
# Currently, everything that is not strictly a TSCDataFrame will go the
# path of an usual array format. This includes:
# * sparse scipy matrices
# * numpy ndarray
# * memmap
# * pandas.DataFrame (Note a TSCDataFrame is also a pandas.DataFrame,
# but not strictly)
tsc_kwargs = {} # no need to check -> overwrite to empty dict
if type(X) == pd.DataFrame:
# special handling of pandas.DataFrame (strictly, not including
# TSCDataFrame) --> keep the type (recover after validation).
assert isinstance(X, pd.DataFrame) # mypy checking
revert_to_data_frame = True
idx, col = X.index, X.columns
else:
revert_to_data_frame = False
idx, col = [None] * 2
X = check_array(
X,
accept_sparse=array_kwargs.pop("accept_sparse", False),
accept_large_sparse=array_kwargs.pop("accept_large_sparse", False),
dtype=array_kwargs.pop("dtype", "numeric"),
order=array_kwargs.pop("order", None),
copy=array_kwargs.pop("copy", False),
force_all_finite=array_kwargs.pop("force_all_finite", True),
ensure_2d=array_kwargs.pop("ensure_2d", True),
allow_nd=array_kwargs.pop("allow_nd", False),
ensure_min_samples=array_kwargs.pop("ensure_min_samples", 1),
ensure_min_features=array_kwargs.pop("ensure_min_features", 1),
estimator=self,
)
if revert_to_data_frame:
X = | pd.DataFrame(X, index=idx, columns=col) | pandas.DataFrame |
import pymongo
import numpy as np
import pandas as pd
from sys import argv
# Set up mongodb database
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["product_Durability_db"]
mycol = mydb[argv[1]]
# myquery = {"address": "Park Lane 38"}
# mydoc = mycol.find(myquery)
# Set up matrix for distance_origin
continents = ["America", "Europe", "Asia", "Africa", "Australia"]
d = {'America': [0, 2, 2, 2, 2], 'Europe': [2, 0, 1, 1, 3], 'Asia': [2, 1, 0, 1, 1],
'Africa': [2, 1, 1, 0, 2], 'Australia': [2, 3, 1, 2, 0]}
mat_continents = | pd.DataFrame(d, columns=continents, index=continents) | pandas.DataFrame |
import tehran_stocks.config as db
import matplotlib.pyplot as plt
from tehran_stocks import Stocks
import pandas as pd
import matplotlib.ticker as mtick
from bidi.algorithm import get_display
import arabic_reshaper
import pathlib
def histogram_value(history_len):
q = f"select date_shamsi,SUM(value) as value from stock_price group by dtyyyymmdd order by date_shamsi desc limit {history_len} "
data = pd.read_sql(q, db.engine)
ax = plt.gca()
data.plot(kind='bar', x='date_shamsi', y='value', ax=ax, color='green')
reshaped_text = \
arabic_reshaper.reshape("ارزش معادلات کل")
text = get_display(reshaped_text)
ax.set_title(text)
plt.savefig(f'../../output/reports/ارزش معادلات کل.png')
ax.cla()
return data
def histogram_value_grouped(history_len):
print(pathlib.Path(__file__).parent.absolute())
print(pathlib.Path().absolute())
q = f"select date_shamsi,SUM(value) as value from stock_price group by dtyyyymmdd order by date_shamsi desc limit {history_len} "
total = pd.read_sql(q, db.engine)
codes = db.session.query(db.distinct(Stocks.group_code)).all()
for i, code in enumerate(codes):
print(code[0])
q = f"""select date_shamsi,group_name,SUM(value) as value
from(select * from stock_price Stock_price , stocks Stock where Stock.code = Stock_price.code) where group_code == {code[0]}
group by dtyyyymmdd,group_code order by date_shamsi desc limit {history_len}
"""
data = pd.read_sql(q, db.engine)
result = | pd.merge(total, data, on='date_shamsi') | pandas.merge |
# NOTE: It is the historian's job to make sure that keywords are not repetitive (they are
# otherwise double-counted into counts).
from collections import defaultdict
from collections import OrderedDict
import os
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
from unidecode import unidecode
import csv
from bs4 import BeautifulSoup, Tag
import sys
import json
import csv
NUM_TOP_WORDS = 20 # The number of top words that we want from each file
CONTEXT_WORDS_AROUND = 50
MAX_EXCLUDE_REGEX_LENGTH = 50
punctuation = ['\.', '/', '\?', '\-', '"', ',', '\\b'] # Punctuation we use within our regexes
data_dirname = os.getcwd() + "/data/"
# Writes all the original interviews that have keywords into a subdirectory.
def write_subcorpora(subcorpora_dirname, filenames, content, keyword_freq_files):
os.mkdir(subcorpora_dirname)
for i in range(len(filenames)):
file = filenames[i]
if file not in keyword_freq_files: continue
new_file = "{}/{}".format(subcorpora_dirname, file)
with open(new_file, "w", encoding = "utf-8") as f:
f.write(content[i])
# Fills in decade years
def fill_years(data, step):
all_years = []
not_given = data["Not given"] if "Not given" in data else 0
for k in data.keys():
if k != "Not given": all_years.append(int(k))
new_data = defaultdict(lambda:0)
new_data["Not given"] = not_given
all_years.sort()
for i in range(all_years[0], all_years[-1] + step, step):
if str(i) in data:
new_data[i] = data[str(i)]
elif i in data:
new_data[i] = data[i]
else:
new_data[i] = 0
return new_data
# Prints out a JSON string that is then read by the Node.js backend.
def print_message(_type, content):
message = {
"type": _type,
"content": content
}
print(json.dumps(message))
# Downloads the NLTK libraries.
def download_nltk():
print_message("progress-message", "Downloading relevant libraries...")
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')
nltk.download('punkt')
print_message("progress", 2)
# Reads in arguments into the directories, words, and metadata file needed for the runs.
def read_arguments():
print_message("progress_message", "Reading in run data...")
data = json.loads(sys.argv[1])
runId = data['id']
runName = data['name']
runDate = data['date']
collections = data['collections']
keywords = data['keywordList']
metadata_file_interviews = data['interviews']
metadata_file_interviewees= data['interviewees']
print_message("progress", 4)
return runId, runName, runDate, collections, keywords, metadata_file_interviews, metadata_file_interviewees
# Creates a new folder to store the final data for the current run.
def create_run_directory(runId):
print_message("progress-message", "Creating a directory to store run results...")
dirname = data_dirname + "runs/" + runId
os.mkdir(dirname)
print_message("progress", 5)
return dirname
# Gets punctuation joined by bars (this is punctuation that we decide to count as separation!)
def get_punctuation_for_regex(punc):
return "|".join(punc)
# Converts the keyword list to Python regex form. Returns the full list of words and the
# included and excluded regexes.
def convert_keywords(keywords):
converted_keywords = []
for k in keywords:
# Sorts the included words backwards to make sure we get the longer words first
included_words = k["include"]
included_words = sorted(included_words, key=lambda l: (len(l), l), reverse=True)
punc = get_punctuation_for_regex(punctuation)
included_regexes = []
for w in included_words:
r = r'(?:{})({})(?:{})'.format(punc, w.replace("*", "[a-zA-Z]*"), punc)
included_regexes.append(r)
excluded_words = k["exclude"]
excluded_regexes = []
for w in excluded_words:
r = r"\b{}\b".format(w.replace("*", "[a-zA-Z]*"))
excluded_regexes.append(w)
k["included_regexes"] = included_regexes
k["include"] = included_words
k["excluded_regexes"] = excluded_regexes
converted_keywords.append(k)
return converted_keywords
# Reads all the text from each text file in the corpus directory. TODO: Resolve utf-8.
def read_corpuses(collections):
new_collections = []
for c in collections:
directory = data_dirname + "corpus-files/" + c["id"]
filenames = []
content = []
for file in os.listdir(directory):
if ".txt" not in file: continue
filenames.append(file)
# "ISO-8859-1" encoding otherwise?
with open("{}/{}".format(directory, file), "r", encoding = "utf-8", errors = "ignore") as f:
content.append(f.read())
c["filenames"] = filenames
c["content"] = content
new_collections.append(c)
return new_collections
# Gets the files for inclusion--excludes any files that are only male interviewees or
# interviews with no transcripts.
def get_included_files(collections, df1, df2, runJSON):
files_for_inclusion = {} # Final list of files for inclusion
# Statistics about file inclusion/exclusion
num_files_no_transcript = {} # Total number of files in collection with no transcript
people = {} # Information about individual people (only "Sex" == "Female" and "Sex" == "Unknown")
male_interviews = {} # Interviews that include males
male_plus_interviews = {} # Interviews with both male and non-male interviews
interview_years = {}
interview_years_by_file = {}
total_interviews = 0
#making a dictionary for the interviewees from id to information
interviewee_id_to_metadata= defaultdict(lambda:[])
for i,r in df2.iterrows():
interviewee_id_to_metadata[r["interviewee_id"]]=r
# Needed information across all collections
interview_years_all_collections = defaultdict(lambda:0)
interviewee_metadata_all_collections = defaultdict(lambda:defaultdict(lambda:0))
# Statistics about interviewees --> interviews
interviews_to_interviewees = defaultdict(lambda:[])
filenames_map = {}
for c in collections:
curr_id = c["id"]
files_for_inclusion[curr_id] = {}
num_files_no_transcript[curr_id] = 0
people[curr_id] = {}
male_interviews[curr_id] = {}
male_plus_interviews[curr_id] = {}
interview_years[curr_id] = defaultdict(lambda:0)
interview_years_by_file = defaultdict(lambda:{})
for f in c["filenames"]:
filenames_map[f] = curr_id
for i, r in df1.iterrows():
f = r["project_file_name"]
# Skips files with no project filename (shouldn't happen)
if | pd.isnull(f) | pandas.isnull |
"""
kkpy.io
========================
Functions to read and write files
.. currentmodule:: io
.. autosummary::
kkpy.io.read_aws
kkpy.io.read_2dvd_rho
kkpy.io.read_mxpol_rhi_with_hc
kkpy.io.read_dem
"""
import numpy as np
import pandas as pd
import datetime
import glob
import os
import sys
def read_aws(time, date_range=True, datadir='/disk/STORAGE/OBS/AWS/', stnid=None, dask=True):
"""
Read AWS_MIN files into dataframe.
Examples
---------
>>> import datetime
>>> df_aws = kkpy.io.read_aws(time=datetime.datetime(2018,2,28,6,0))
>>> df_aws = kkpy.io.read_aws(time=[datetime.datetime(2018,2,28,6,0),datetime.datetime(2018,3,1,12,0)], datadir='/path/to/aws/files/')
Parameters
----------
time : datetime or array_like of datetime
Datetime of the data you want to read.
If this is array of two elements, it will read all data within two datetimes by default.
If this is array of elements and keyword *date_range* is False, it will read the data of specific time of each element.
date_range : bool, optional
False if argument *time* contains element of specific time you want to read.
datadir : str, optional
Directory of data.
stnid : list, optional
List of station id you want to read. Read all site if None.
dask : boolean, optional
Return a dask dataframe if True, otherwise return a pandas dataframe.
Returns
---------
df_aws : dataframe
Return dataframe of aws data.
"""
import dask.dataframe as dd
if time is None:
sys.exit(f'{__name__}: Check time argument')
if len(time) == 1:
date_range = False
if date_range:
if len(time) != 2:
sys.exit(f'{__name__}: Check time and date_range arguments')
if time[0] >= time[1]:
sys.exit(f'{__name__}: time[1] must be greater than time[0]')
dt_start = datetime.datetime(time[0].year, time[0].month, time[0].day, time[0].hour, time[0].minute)
dt_finis = datetime.datetime(time[1].year, time[1].month, time[1].day, time[1].hour, time[1].minute)
# Get file list
filearr = np.array([])
_dt = dt_start
while _dt <= dt_finis:
_filearr = np.sort(glob.glob(f'{datadir}/{_dt:%Y%m}/{_dt:%d}/AWS_MIN_{_dt:%Y%m%d%H%M}'))
filearr = np.append(filearr, _filearr)
_dt = _dt + datetime.timedelta(minutes=1)
yyyy_filearr = [np.int(os.path.basename(x)[-12:-8]) for x in filearr]
mm_filearr = [np.int(os.path.basename(x)[-8:-6]) for x in filearr]
dd_filearr = [np.int(os.path.basename(x)[-6:-4]) for x in filearr]
hh_filearr = [np.int(os.path.basename(x)[-4:-2]) for x in filearr]
ii_filearr = [np.int(os.path.basename(x)[-2:]) for x in filearr]
dt_filearr = np.array([datetime.datetime(yyyy,mm,dd,hh,ii) for (yyyy,mm,dd,hh,ii) in zip(yyyy_filearr, mm_filearr, dd_filearr, hh_filearr, ii_filearr)])
filearr = filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
dt_filearr = dt_filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
else:
list_dt_yyyymmddhhii = np.unique(np.array([datetime.datetime(_time.year, _time.month, _time.day, _time.hour, _time.minute) for _time in time]))
filearr = np.array([f'{datadir}/{_dt:%Y%m}/{_dt:%d}/AWS_MIN_{_dt:%Y%m%d%H%M}' for _dt in list_dt_yyyymmddhhii])
dt_filearr = list_dt_yyyymmddhhii
if len(filearr) == 0:
sys.exit(f'{__name__}: No matched data for the given time period')
df_list = []
names = ['ID', 'YMDHI', 'LON', 'LAT', 'HGT',
'WD', 'WS', 'T', 'RH',
'PA', 'PS', 'RE',
'R60mAcc', 'R1d', 'R15m', 'R60m',
'WDS', 'WSS', 'dummy']
df_aws = dd.read_csv(filearr.tolist(), delimiter='#', names=names, header=None, na_values=[-999,-997])
df_aws = df_aws.drop('dummy', axis=1)
df_aws.WD = df_aws.WD/10.
df_aws.WS = df_aws.WS/10.
df_aws.T = df_aws['T']/10.
df_aws.RH = df_aws.RH/10.
df_aws.PA = df_aws.PA/10.
df_aws.PS = df_aws.PS/10.
df_aws.RE = df_aws.RE/10.
df_aws.R60mAcc = df_aws.R60mAcc/10.
df_aws.R1d = df_aws.R1d/10.
df_aws.R15m = df_aws.R15m/10.
df_aws.R60m = df_aws.R60m/10.
df_aws.WDS = df_aws.WDS/10.
df_aws.WSS = df_aws.WSS/10.
if stnid:
df_aws = df_aws[df_aws['ID'].isin(stnid)]
df_aws = df_aws.set_index(dd.to_datetime(df_aws['YMDHI'], format='%Y%m%d%H%M'))
df_aws = df_aws.drop('YMDHI', axis=1)
if dask:
return df_aws
else:
return df_aws.compute()
def read_2dvd_rho(time, date_range=True, datadir='/disk/common/kwonil_rainy/RHO_2DVD/', filename='2DVD_Dapp_v_rho_201*Deq.txt'):
"""
Read 2DVD density files into dataframe.
Examples
---------
>>> import datetime
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=datetime.datetime(2018,2,28)) # automatically date_range=False
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=[datetime.datetime(2018,2,28,6),datetime.datetime(2018,3,1,12)], datadir='/path/to/2dvd/files/')
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=list_of_many_datetimes, date_range=False)
>>> df_2dvd_drop = kkpy.io.read_2dvd_rho(time=datetime.datetime(2018,2,28), filename='2DVD_rho_test_*.txt')
Parameters
----------
time : datetime or array_like of datetime
Datetime of the data you want to read.
If this is array of two elements, it will read all data within two datetimes by default.
If this is array of elements and keyword *date_range* is False, it will read the data of specific time of each element.
date_range : bool, optional
False if argument *time* contains element of specific time you want to read.
datadir : str, optional
Directory of data.
filename : str, optional
File naming of data.
Returns
---------
df_2dvd_drop : dataframe
Return dataframe of 2dvd data.
"""
# Get file list
filearr = np.array(np.sort(glob.glob(f'{datadir}/**/{filename}', recursive=True)))
yyyy_filearr = [np.int(os.path.basename(x)[-27:-23]) for x in filearr]
mm_filearr = [np.int(os.path.basename(x)[-23:-21]) for x in filearr]
dd_filearr = [np.int(os.path.basename(x)[-21:-19]) for x in filearr]
dt_filearr = np.array([datetime.datetime(yyyy,mm,dd) for (yyyy, mm, dd) in zip(yyyy_filearr, mm_filearr, dd_filearr)])
if time is None:
sys.exit(f'{__name__}: Check time argument')
if len(time) == 1:
date_range = False
if date_range:
if len(time) != 2:
sys.exit(f'{__name__}: Check time and date_range arguments')
if time[0] >= time[1]:
sys.exit(f'{__name__}: time[1] must be greater than time[0]')
dt_start = datetime.datetime(time[0].year, time[0].month, time[0].day)
dt_finis = datetime.datetime(time[1].year, time[1].month, time[1].day)
filearr = filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
dt_filearr = dt_filearr[(dt_filearr >= dt_start) & (dt_filearr <= dt_finis)]
else:
list_dt_yyyymmdd = np.unique(np.array([datetime.datetime(_time.year, _time.month, _time.day) for _time in time]))
filearr = filearr[np.isin(dt_filearr, list_dt_yyyymmdd)]
dt_filearr = dt_filearr[np.isin(dt_filearr, list_dt_yyyymmdd)]
if len(filearr) == 0:
sys.exit(f'{__name__}: No matched data for the given time period')
# # READ DATA
columns = ['hhmm', 'Dapp', 'VEL', 'RHO', 'AREA', 'WA', 'HA', 'WB', 'HB', 'Deq']
dflist = []
for i_file, (file, dt) in enumerate(zip(filearr, dt_filearr)):
_df = pd.read_csv(file, skiprows=1, names=columns, header=None, delim_whitespace=True)
_df['year'] = dt.year
_df['month'] = dt.month
_df['day'] = dt.day
_df['hour'] = np.int_(_df['hhmm'] / 100)
_df['minute'] = _df['hhmm'] % 100
_df['jultime'] = | pd.to_datetime(_df[['year','month','day','hour','minute']]) | pandas.to_datetime |
"""
Utilities for dealing with PCTS cases.
"""
import dataclasses
import re
import typing
import pandas
GENERAL_PCTS_RE = re.compile("([A-Z]+)-([0-9X]{4})-([0-9]+)((?:-[A-Z0-9]+)*)$")
MISSING_YEAR_RE = re.compile("([A-Z]+)-([0-9]+)((?:-[A-Z0-9]+)*)$")
VALID_PCTS_PREFIX = {
"AA",
"ADM",
"APCC",
"APCE",
"APCH",
"APCNV",
"APCS",
"APCSV",
"APCW",
"CHC",
"CPC",
"DIR",
"ENV",
"HPO",
"PAR",
"PS",
"TT",
"VTT",
"ZA",
}
VALID_PCTS_SUFFIX = {
"1A",
"2A",
"AC",
"ACI",
"ADD1",
"ADU",
"AIC",
"BL",
"BSA",
"CA",
"CASP",
"CATEX",
"CC",
"CC1",
"CC3",
"CCMP",
"CDO",
"CDP",
"CE",
"CEX",
"CLQ",
"CM",
"CN",
"COA",
"COC",
"CPIO",
"CPIOA",
"CPIOC",
"CPIOE",
"CPU",
"CR",
"CRA",
"CU",
"CUB",
"CUC",
"CUE",
"CUW",
"CUX",
"CUZ",
"CWC",
"CWNC",
"DA",
"DB",
"DD",
"DEM",
"DI",
"DPS",
"DRB",
"EAF",
"EIR",
"ELD",
"EXT",
"EXT2",
"EXT3",
"EXT4",
"F",
"GB",
"GPA",
"GPAJ",
"HCA",
"HCM",
"HD",
"HPOZ",
"ICO",
"INT",
"M1",
"M2",
"M3",
"M6",
"M7",
"M8",
"M9",
"M10",
"M11",
"MA",
"MAEX",
"MCUP",
"MEL",
"MND",
"MPA",
"MPC",
"MPR",
"MSC",
"MSP",
"NC",
"ND",
"NR",
"O",
"OVR",
"P",
"PA",
"PA1",
"PA2",
"PA3",
"PA4",
"PA5",
"PA6",
"PA7",
"PA9",
"PA10",
"PA15",
"PA16",
"PA17",
"PAB",
"PAD",
"PMEX",
"PMLA",
"PMW",
"POD",
"PP",
"PPR",
"PPSP",
"PSH",
"PUB",
"QC",
"RAO",
"RDP",
"RDPA",
"REC1",
"REC2",
"REC3",
"REC4",
"REC5",
"REV",
"RFA",
"RV",
"SCEA",
"SCPE",
"SE",
"SIP",
"SL",
"SLD",
"SM",
"SN",
"SP",
"SPE",
"SPP",
"SPPA",
"SPPM",
"SPR",
"SUD",
"SUP1",
"TC",
"TDR",
"TOC",
"UAIZ",
"UDU",
"VCU",
"VSO",
"VZC",
"VZCJ",
"WDI",
"WTM",
"YV",
"ZAA",
"ZAD",
"ZAI",
"ZBA",
"ZC",
"ZCJ",
"ZV",
}
@dataclasses.dataclass
class PCTSCaseNumber:
"""
A dataclass for parsing and storing PCTS Case Number info.
The information is accessible as data attributes on the class instance.
If the constructor is unable to parse the pcts_case_string,
a ValueError will be raised.
References
==========
https://planning.lacity.org/resources/prefix-suffix-report
"""
prefix: typing.Optional[str] = None
year: typing.Optional[int] = None
case: typing.Optional[int] = None
suffix: typing.Optional[typing.List[str]] = None
def __init__(self, pcts_case_string: str):
try:
self._general_pcts_parser(pcts_case_string)
except ValueError:
try:
self._next_pcts_parser(pcts_case_string)
except ValueError:
pass
def _general_pcts_parser(self, pcts_case_string: str):
"""
Create a new PCTSCaseNumber instance.
Parameters
==========
pcts_case_string: str
The PCTS case number string to be parsed.
"""
matches = GENERAL_PCTS_RE.match(pcts_case_string.strip())
if matches is None:
raise ValueError("Couldn't parse PCTS string")
groups = matches.groups()
self.prefix = groups[0]
self.year = int(groups[1])
self.case = int(groups[2])
# Suffix
if groups[3]:
self.suffix = groups[3].strip("-").split("-")
def _next_pcts_parser(self, pcts_case_string: str):
# Match where there is no year, but there is prefix, case ID, and suffix
matches = MISSING_YEAR_RE.match(pcts_case_string.strip())
if matches is None:
raise ValueError(f"Coudln't parse PCTS string {pcts_case_string}")
groups = matches.groups()
# Prefix
self.prefix = groups[0]
self.year = int(groups)[1]
# Suffix
if groups[2]:
self.suffix = groups[2].strip("-").split("-")
# Subset PCTS given a start date and a list of prefixes or suffixes
def subset_pcts(
pcts,
start_date=None,
end_date=None,
prefix_list=None,
suffix_list=None,
get_dummies=False,
verbose=False,
):
"""
Download an subset a PCTS extract for analysis. This is intended to
be the primary entry point for loading PCTS data.
Parameters
==========
pcts: pandas.DataFrame
A PCTS extract of the shape returned by subset_pcts.
start_date: time-like
Optional start date cutoff.
end_date: time-like
Optional end-date cutoff
prefix_list: iterable of strings
A list of prefixes to use. If not given, all prefixes are returned.
suffix_list: iterable of strings
A list of suffixes to use. If not given, all suffixes are used.
get_dummies: bool
Whether to get dummy indicator columns for all prefixes and suffixes.
verbose: bool
Whether to ouptut information about subsetting as it happens.
"""
# Subset PCTS by start / end date
start_date = (
pandas.to_datetime(start_date)
if start_date
else pandas.to_datetime("2010-01-01")
)
end_date = | pandas.to_datetime(end_date) | pandas.to_datetime |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + | pd.DateOffset(years=18) | pandas.DateOffset |
# Make a stackplot and a stackplot where total = 100% of agegroups
# <NAME> (@rcsmit) - MIT Licence
# IN: https://data.rivm.nl/covid-19/COVID-19_ziekenhuis_ic_opnames_per_leeftijdsgroep.csv
# OUT : Stackplots
#
# TODO : Legend DONE
# Nice colors DONE
# Restrictions ??
# Set a date-period DONE
# Make everything a function call
# Integration in the dashboard
#
# Inspired by a graph by @chivotweets
# https://twitter.com/rubenivangaalen/status/1374443261704605697
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import datetime
import datetime as dt
from datetime import datetime, timedelta
import prepare_casuslandelijk
def save_df(df, name):
""" _ _ _ """
OUTPUT_DIR = (
"C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\output\\"
)
name_ = OUTPUT_DIR + name + ".csv"
compression_opts = dict(method=None, archive_name=name_)
df.to_csv(name_, index=False, compression=compression_opts)
print("--- Saving " + name_ + " ---")
def smooth(df, columnlist):
columnlist_sma_df = []
columnlist_df= []
columnlist_names= []
columnlist_ages = []
# 0-9 10-19 20-29 30-39 40-49 50-59 60-69 70-79 80+
#pop_ = [1756000, 1980000, 2245000, 2176000, 2164000, 2548000, 2141000, 1615000, 839000]
#fraction = [0.10055, 0.11338, 0.12855, 0.12460, 0.12391, 0.14590, 0.12260, 0.09248, 0.04804]
for c in columnlist:
#new_column = c + "_SMA"
new_column = c
# print("Generating " + new_column + "...")
df[new_column] = (
df.iloc[:, df.columns.get_loc(c)].rolling(window=1, center=True).mean()
)
columnlist_sma_df.append(df[new_column])
columnlist_df.append(df[c])
columnlist_names.append(new_column)
columnlist_ages.append(c) # alleen de leeftijden, voor de legenda
return df,columnlist_df, columnlist_sma_df,columnlist_names,columnlist_ages
def hundred_stack_area(df, column_list):
l = len(df)
df["rowtotal"] = np.nan
columnlist_names = []
dfcolumnlist = []
columnlist_ages = []
for c in column_list:
new_column = str(c) + "_hstack"
columnlist_ages.append(c)
df[new_column] = np.nan
columnlist_names.append(new_column)
for r in range(df.first_valid_index(),(df.first_valid_index()+l)):
row_total = 0
for c in column_list:
# print (r)
# print (df)
# print (df.loc[r ,c]
row_total += df.loc[r ,c]
df.loc[r, "rowtotal"] = row_total
for c in column_list:
new_column = str(c) + "_hstack"
for r in range(df.first_valid_index(),(df.first_valid_index()+l)):
df.loc[r, new_column] = round((100 * df.loc[r, c] / df.loc[r, "rowtotal"]),2)
dfcolumnlist.append(df[new_column])
df = df.drop(columns=["rowtotal"], axis=1)
return df, columnlist_names, dfcolumnlist,columnlist_ages
def drop_columns(df, what_to_drop):
""" drop columns. what_to_drop : list """
if what_to_drop != None:
what_to_drop = [what_to_drop]
print("dropping " + str(what_to_drop))
for d in what_to_drop:
df = df.drop(columns=[d], axis=1)
return df
def convert(list):
return tuple(list)
def make_age_graph(df, d, titel):
# df = agg_ages(df)
fig, ax = plt.subplots()
for d_ in d:
plt.plot(df["Date_of_statistics_week_start"], df[d_], label = d_)
plt.legend()
titel_ = titel + " (weekcijfers)"
plt.title(titel_)
plt.tight_layout()
plt.show()
def make_graph(df, columns_df,columnlist_names, columnlist_ages, datumveld, titel):
#datumlijst = df[datumveld].tolist()
#df = df[:-1] # drop last row since this one is incomplete
datumlijst = df[datumveld].tolist()
color_list = [ "#ff6666", # reddish 0
"#ac80a0", # purple 1
"#3fa34d", # green 2
"#EAD94C", # yellow 3
"#EFA00B", # orange 4
"#7b2d26", # red 5
"#3e5c76", # blue 6
"#e49273" , # dark salmon 7
"#1D2D44", # 8
"#02A6A8",
"#4E9148",
"#F05225",
"#024754",
"#FBAA27",
"#302823",
"#F07826",
]
fig, ax = plt.subplots()
sp = ax.stackplot(datumlijst, columns_df, colors=color_list)
#ax.legend(loc="upper left")
plt.title(titel)
proxy = [mpl.patches.Rectangle((0,0), 0,0, facecolor=pol.get_facecolor()[0]) for pol in sp]
ax.legend(proxy, tuple (columnlist_ages), bbox_to_anchor=(1.3, 1),loc="best")
plt.tight_layout()
plt.show()
def show(df, c1,titel):
datumveld = "Date_of_statistics_week_start"
df, columnlist_df, columnlist_sma_df, columnlist_names, columnlist_ages = smooth(df, c1)
titel = titel + " (weekcijfers)"
make_graph (df, columnlist_df, columnlist_sma_df, columnlist_names, datumveld, titel)
df, columnlist_hdred_names, columnlist_hdred_df, columnlist_ages = hundred_stack_area(df, columnlist_names)
make_graph (df, columnlist_hdred_df,columnlist_names, columnlist_ages , datumveld, titel)
def agg_ages(df):
# make age groups
df["0-49"] = df["0-14"] + df["15-19"] + df["20-24"] + df["25-29"] + df["30-34"] + df["35-39"] + df["40-44"] + df["45-49"]
df["50-79"] = df["50-54"] + df["55-59"] + df["60-64"] + df["65-69"] + df["70-74"] + df["75-79"]
df["80+"] = df["80-84"] + df["85-89"] + df["90+"]
return df
def prepare_data():
show_from = "2020-1-1"
show_until = "2030-1-1"
url1 = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\COVID-19_ziekenhuis_ic_opnames_per_leeftijdsgroep.csv"
# url1 = "https://data.rivm.nl/covid-19/COVID-19_ziekenhuis_ic_opnames_per_leeftijdsgroep.csv"
df = pd.read_csv(url1, delimiter=";", low_memory=False)
datumveld = "Date_of_statistics_week_start"
df[datumveld] = pd.to_datetime(df[datumveld], format="%Y-%m-%d")
df = df.reset_index()
df.fillna(value=0, inplace=True)
startdate = | pd.to_datetime(show_from) | pandas.to_datetime |
import numpy as np
from scipy.spatial import distance_matrix, distance
from visualizations.iVisualization import VisualizationInterface
from controls.controllers import TimeSeriesController
import panel as pn
import holoviews as hv
from holoviews.streams import Pipe, Buffer
import pandas as pd
import time
from threading import Thread
from tkinter import *
class TimeSeries(VisualizationInterface):
def __init__(self, main):
self._main = main
self._dfstream = Buffer(pd.DataFrame({'time': [], 'neurons': [], 'color': []}, columns=['time', 'neurons', 'color']), length=1, index=False)#
self._avarage_points = Buffer(pd.DataFrame({'time': [], 'neurons': []}, columns=['time', 'neurons']), length=1, index=False)#
self._controls = TimeSeriesController(self._calculate, self._clear, self._dfstream, self._avarage_points, name = "Time Series")
self._pipe_points = Pipe(data=[])
self.stop = False
self.line = []
def thread(func):
def wrapper(*args, **kwargs):
current_thread = Thread(target=func, args=args, kwargs=kwargs)
current_thread.start()
return wrapper
def _activate_controllers(self, ):
self._main._controls.append(pn.Column(self._controls))
cmin, cmax = self._main._pipe.data.min(), self._main._pipe.data.max()
Points = hv.DynamicMap(hv.Points, streams=[self._dfstream]).apply.opts(color='color', cmap=self._main._maincontrol.param.colormap, clim=(cmin, cmax))
Curve = hv.DynamicMap(hv.Curve, streams=[self._avarage_points]).opts(color='red')
self._main._timeseries.append((Points*Curve).opts(width=950, height=350, ylim=(-1, self._main._m*self._main._n)))
self._main._pdmap[0] = pn.Column(self._main._Image * hv.DynamicMap(hv.Points, streams=[self._pipe_points]).opts(color='Black', marker='*', size=30))
def _deactivate_controllers(self,):
self._main._timeseries.clear()
self._main._pdmap[0] = pn.Column(self._main._Image * self._main._Paths)
@thread
def _calculate(self, ):
bmu = np.apply_along_axis(lambda x: np.argmin( np.linalg.norm(self._main._weights - x.reshape((1, self._main._dim)), axis=1)), 1, self._main._idata)
matrix = self._main._pipe.data.reshape(-1,1)
for i, u in enumerate(bmu):
self._pipe_points.send(self._main._get_neuron_xy(u))
self._dfstream.send(pd.DataFrame(np.vstack([i, u, matrix[u]]).T, columns=['time', 'neurons', 'color']))
ewa = 0.01*self._controls.betta*self._avarage_points.data.neurons.iloc[-1] + (1-0.01*self._controls.betta)*u if self._avarage_points.data.neurons.size>0 else u #Exponentially Weighted Averages
self._avarage_points.send(pd.DataFrame([(i, ewa)], columns=['time', 'neurons']))
time.sleep(self._controls.speed)
if self.stop:
self.stop = False
break
def _clear(self,):
self.stop = True
self._dfstream.clear()
self._avarage_points.clear()
self._pipe_points.send([])
def _add_data(self, unit = None):
u = np.clip(unit, 0, self._main._m * self._main._n - 1)
i = self._dfstream.data.size + 1
self._pipe_points.send(self._main._get_neuron_xy(u))
self._dfstream.send( | pd.DataFrame([(i, u)], columns=['time', 'neurons']) | pandas.DataFrame |
'''''
Authors: <NAME> (@anabab1999) and <NAME> (@felipezara2013)
'''
from calendars import DayCounts
import pandas as pd
from pandas.tseries.offsets import DateOffset
from bloomberg import BBG
import numpy as np
bbg = BBG()
#Puxando os tickers para a curva zero
tickers_zero_curve = ['S0023Z 1Y BLC2 Curncy',
'S0023Z 1D BLC2 Curncy',
'S0023Z 3M BLC2 Curncy',
'S0023Z 1W BLC2 Curncy',
'S0023Z 10Y BLC2 Curncy',
'S0023Z 1M BLC2 Curncy',
'S0023Z 2Y BLC2 Curncy',
'S0023Z 6M BLC2 Curncy',
'S0023Z 2M BLC2 Curncy',
'S0023Z 5Y BLC2 Curncy',
'S0023Z 4M BLC2 Curncy',
'S0023Z 2D BLC2 Curncy',
'S0023Z 9M BLC2 Curncy',
'S0023Z 3Y BLC2 Curncy',
'S0023Z 4Y BLC2 Curncy',
'S0023Z 50Y BLC2 Curncy',
'S0023Z 12Y BLC2 Curncy',
'S0023Z 18M BLC2 Curncy',
'S0023Z 7Y BLC2 Curncy',
'S0023Z 5M BLC2 Curncy',
'S0023Z 6Y BLC2 Curncy',
'S0023Z 2W BLC2 Curncy',
'S0023Z 11M BLC2 Curncy',
'S0023Z 15M BLC2 Curncy',
'S0023Z 21M BLC2 Curncy',
'S0023Z 15Y BLC2 Curncy',
'S0023Z 25Y BLC2 Curncy',
'S0023Z 8Y BLC2 Curncy',
'S0023Z 10M BLC2 Curncy',
'S0023Z 20Y BLC2 Curncy',
'S0023Z 33M BLC2 Curncy',
'S0023Z 7M BLC2 Curncy',
'S0023Z 8M BLC2 Curncy',
'S0023Z 11Y BLC2 Curncy',
'S0023Z 14Y BLC2 Curncy',
'S0023Z 18Y BLC2 Curncy',
'S0023Z 19Y BLC2 Curncy',
'S0023Z 23D BLC2 Curncy',
'S0023Z 9Y BLC2 Curncy',
'S0023Z 17M BLC2 Curncy',
'S0023Z 1I BLC2 Curncy',
'S0023Z 22Y BLC2 Curncy',
'S0023Z 28Y BLC2 Curncy',
'S0023Z 2I BLC2 Curncy',
'S0023Z 30Y BLC2 Curncy',
'S0023Z 31Y BLC2 Curncy',
'S0023Z 32Y BLC2 Curncy',
'S0023Z 38Y BLC2 Curncy',
'S0023Z 39Y BLC2 Curncy',
'S0023Z 40Y BLC2 Curncy',
'S0023Z 42D BLC2 Curncy',
'S0023Z 48Y BLC2 Curncy']
df_bbg = bbg.fetch_series(tickers_zero_curve, "PX_LAST",
startdate = pd.to_datetime('today'),
enddate = pd.to_datetime('today'))
df_bbg = df_bbg.transpose()
df_bbg_m = bbg.fetch_contract_parameter(tickers_zero_curve, "MATURITY")
''''
The Zero curve will be used on the interpolation, to discover the rate for a specific term.
'''
# fazendo a curva zero
zero_curve = pd.concat([df_bbg, df_bbg_m], axis=1, sort= True).set_index('MATURITY').sort_index()
zero_curve = zero_curve.astype(float)
zero_curve = zero_curve.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='backward', limit_area=None, downcast=None)
zero_curve.index = pd.to_datetime(zero_curve.index)
#def que calcula a parte fixa do contrato de swap
''''
The function below will calculate the value of swap fixed leg
for a specific term. It calculates based on the interpolation of the Zero curve.
'''
def swap_fixed_leg_pv(today, rate, busdays, calendartype, maturity=10, periodcupons=6, notional=1000000):
global zero_curve
dc1 = DayCounts(busdays, calendar=calendartype)
today = pd.to_datetime(today)
date_range = pd.date_range(start=today, end=today + DateOffset(years=maturity), freq= | DateOffset(months=periodcupons) | pandas.tseries.offsets.DateOffset |
# This Python file uses the following encoding: utf-8
# <NAME> <<EMAIL>>, september 2020
import os
import pandas as pd
import numpy as np
from datetime import date
from qcodes.instrument.base import Instrument
class BlueFors(Instrument):
"""
This is the QCoDeS python driver to extract the temperature and pressure
from a BlueFors fridge
"""
def __init__(self, name : str,
folder_path : str,
channel_vacuum_can : int,
channel_pumping_line : int,
channel_compressor_outlet : int,
channel_compressor_inlet : int,
channel_mixture_tank : int,
channel_venting_line : int,
channel_50k_plate : int,
channel_4k_plate : int,
channel_still : int,
channel_mixing_chamber : int,
channel_magnet : int=None,
**kwargs) -> None:
"""
QCoDeS driver for BlueFors fridges.
! This driver get parameters from the fridge log files.
! It does not interact with the fridge electronics.
Args:
name: Name of the instrument.
folder_path: Valid path toward the BlueFors log folder.
channel_vacuum_can: channel of the vacuum can
channel_pumping_line: channel of the pumping line.
channel_compressor_outlet: channel of the compressor outlet.
channel_compressor_inlet: channel of the compressor inlet.
channel_mixture_tank: channel of the mixture tank.
channel_venting_line: channel of the venting line.
channel_50k_plate: channel of the 50k plate.
channel_4k_plate: channel of the 4k plate.
channel_still: channel of the still.
channel_mixing_chamber: channel of the mixing chamber.
channel_magnet: channel of the magnet.
"""
super().__init__(name = name, **kwargs)
self.folder_path = os.path.abspath(folder_path)
self.add_parameter(name = 'pressure_vacuum_can',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_vacuum_can),
docstring = 'Pressure of the vacuum can',
)
self.add_parameter(name = 'pressure_pumping_line',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_pumping_line),
docstring = 'Pressure of the pumping line',
)
self.add_parameter(name = 'pressure_compressor_outlet',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_compressor_outlet),
docstring = 'Pressure of the compressor outlet',
)
self.add_parameter(name = 'pressure_compressor_inlet',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_compressor_inlet),
docstring = 'Pressure of the compressor inlet',
)
self.add_parameter(name = 'pressure_mixture_tank',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_mixture_tank),
docstring = 'Pressure of the mixture tank',
)
self.add_parameter(name = 'pressure_venting_line',
unit = 'mBar',
get_parser = float,
get_cmd = lambda: self.get_pressure(channel_venting_line),
docstring = 'Pressure of the venting line',
)
self.add_parameter(name = 'temperature_50k_plate',
unit = 'K',
get_parser = float,
get_cmd = lambda: self.get_temperature(channel_50k_plate),
docstring = 'Temperature of the 50K plate',
)
self.add_parameter(name = 'temperature_4k_plate',
unit = 'K',
get_parser = float,
get_cmd = lambda: self.get_temperature(channel_4k_plate),
docstring = 'Temperature of the 4K plate',
)
if channel_magnet is not None:
self.add_parameter(name = 'temperature_magnet',
unit = 'K',
get_parser = float,
get_cmd = lambda: self.get_temperature(channel_magnet),
docstring = 'Temperature of the magnet',
)
self.add_parameter(name = 'temperature_still',
unit = 'K',
get_parser = float,
get_cmd = lambda: self.get_temperature(channel_still),
docstring = 'Temperature of the still',
)
self.add_parameter(name = 'temperature_mixing_chamber',
unit = 'K',
get_parser = float,
get_cmd = lambda: self.get_temperature(channel_mixing_chamber),
docstring = 'Temperature of the mixing chamber',
)
self.connect_message()
def get_temperature(self, channel: int) -> float:
"""
Return the last registered temperature of the current day for the
channel.
Args:
channel (int): Channel from which the temperature is extracted.
Returns:
temperature (float): Temperature of the channel in Kelvin.
"""
folder_name = date.today().strftime("%y-%m-%d")
file_path = os.path.join(self.folder_path, folder_name, 'CH'+str(channel)+' T '+folder_name+'.log')
try:
df = pd.read_csv(file_path,
delimiter = ',',
names = ['date', 'time', 'y'],
header = None)
# There is a space before the day
df.index = pd.to_datetime(df['date']+'-'+df['time'], format=' %d-%m-%y-%H:%M:%S')
return df.iloc[-1]['y']
except (PermissionError, OSError) as err:
self.log.warn('Cannot access log file: {}. Returning np.nan instead of the temperature value.'.format(err))
return np.nan
except IndexError as err:
self.log.warn('Cannot parse log file: {}. Returning np.nan instead of the temperature value.'.format(err))
return np.nan
def get_pressure(self, channel: int) -> float:
"""
Return the last registered pressure of the current day for the
channel.
Args:
channel (int): Channel from which the pressure is extracted.
Returns:
pressure (float): Pressure of the channel in mBar.
"""
folder_name = date.today().strftime("%y-%m-%d")
file_path = os.path.join(self.folder_path, folder_name, 'maxigauge '+folder_name+'.log')
try:
df = pd.read_csv(file_path,
delimiter=',',
names=['date', 'time',
'ch1_name', 'ch1_void1', 'ch1_status', 'ch1_pressure', 'ch1_void2', 'ch1_void3',
'ch2_name', 'ch2_void1', 'ch2_status', 'ch2_pressure', 'ch2_void2', 'ch2_void3',
'ch3_name', 'ch3_void1', 'ch3_status', 'ch3_pressure', 'ch3_void2', 'ch3_void3',
'ch4_name', 'ch4_void1', 'ch4_status', 'ch4_pressure', 'ch4_void2', 'ch4_void3',
'ch5_name', 'ch5_void1', 'ch5_status', 'ch5_pressure', 'ch5_void2', 'ch5_void3',
'ch6_name', 'ch6_void1', 'ch6_status', 'ch6_pressure', 'ch6_void2', 'ch6_void3',
'void'],
header=None)
df.index = | pd.to_datetime(df['date']+'-'+df['time'], format='%d-%m-%y-%H:%M:%S') | pandas.to_datetime |
#!/usr/bin/env python3
### Burak Less Data Experiment Utils
### GENERIC
import copy
import datetime
import io
import os
from os import listdir
from os.path import isfile, join, isdir
import sys
from functools import partial
### DATA PROCESS
import pandas as pd
import numpy as np
import ast
from sklearn.metrics import recall_score, classification_report, auc, roc_curve
import re
### PLOTTING & LOGS
import matplotlib.pyplot as plt
import seaborn as sns
import logging
from pylab import rcParams
rcParams['figure.figsize'] = 8, 6
### DATA STORING
import h5py
import pickle
import json
### RANDOM
import random
import time
#from numpy.random import seed
import multiprocessing
from multiprocessing import Pool
#print("CPU COUNT:", multiprocessing.cpu_count())
from fast_features import generate_features
from scipy.stats import ks_2samp
###PREDICT UTILS###
def plot_cm(labels, predictions, name):
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels, predictions)
plt.figure(figsize=(5,5))
sns.heatmap(cm, annot=True, fmt="d")
plt.title('{}'.format(name))
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.show()
def majority_filter_traditional(seq, width):
offset = width // 2
seq = [0] * offset + seq
result = []
for i in range(len(seq) - offset):
a = seq[i:i+width]
result.append(max(set(a), key=a.count))
return result
def consecutive_filter(seq,width):
result = []
for index in range(len(seq)):
tmp_set_list = list(set(seq[index:index+width]))
if len(tmp_set_list) == 1 and tmp_set_list[0] == seq[index]:
result.append(seq[index])
else:
result.append(0) #Assumes healthy label is 0
return result
def calculate_miss_rates(true_label,pred_label):
alarm_dict = {}
normal_true_idx = np.where(true_label==0)[0]
anom_true_idx = np.where(true_label!=0)[0]
#Find number of normal samples labeled as anomalous
fp_deploy = pred_label[normal_true_idx][pred_label[normal_true_idx] != 0]
false_alarm_rate = len(fp_deploy) / len(normal_true_idx)
logging.info("Total normal runs classified as anomaly: %s, Total normal runs %s ",str(len(fp_deploy)),str(len(normal_true_idx)))
logging.info(false_alarm_rate)
#Find number of anomalous samples labeled as normal
fn_deploy = pred_label[anom_true_idx][pred_label[anom_true_idx] == 0]
anom_miss_rate = len(fn_deploy) / len(anom_true_idx)
logging.info("Total anom runs classified as normal: %s, Total anom runs %s ",str(len(fn_deploy)),str(len(anom_true_idx)))
logging.info(anom_miss_rate)
alarm_dict['false_alarm_rate'] = false_alarm_rate
alarm_dict['anom_miss_rate'] = anom_miss_rate
return alarm_dict
def false_anom_rate_calc(true_label,pred_label,conf,cv_index,name,save):
"""
Calculates false alarm rate and anomaly miss rate
Assumes 0 is normal label and other labels are anomalies
Args:
true_label: Array composed of integer labels, e.g., [0,0,4,2]
pred_label: Array composed of integer labels, e.g., [0,0,4,2]
"""
# • False alarm rate: The percentage of the healthy windows that are identified as anomalous (any anomaly type).
# • Anomaly miss rate: The percentage of the anomalous windows that are identified as healthy
alarm_dict = {}
normal_true_idx = np.where(true_label==0)[0]
anom_true_idx = np.where(true_label!=0)[0]
#Find number of normal samples labeled as anomalous
fp_deploy = pred_label[normal_true_idx][pred_label[normal_true_idx] != 0]
false_alarm_rate = len(fp_deploy) / len(normal_true_idx)
logging.info("Total normal runs classified as anomaly: %s, Total normal runs %s ",str(len(fp_deploy)),str(len(normal_true_idx)))
logging.info(false_alarm_rate)
#Find number of anomalous samples labeled as normal
fn_deploy = pred_label[anom_true_idx][pred_label[anom_true_idx] == 0]
anom_miss_rate = len(fn_deploy) / len(anom_true_idx)
logging.info("Total anom runs classified as normal: %s, Total anom runs %s ",str(len(fn_deploy)),str(len(anom_true_idx)))
logging.info(anom_miss_rate)
alarm_dict['false_alarm_rate'] = false_alarm_rate
alarm_dict['anom_miss_rate'] = anom_miss_rate
if save:
json_dump = json.dumps(alarm_dict)
f_json = open(conf['results_dir'] / ("{}_alert_dict.json".format(name)),"w")
f_json.write(json_dump)
f_json.close()
def analysis_wrapper_multiclass(true_labels, pred_labels,conf,cv_index,name,name_cm='Deployment Data',save=True,plot=True):
"""
true_labels: it should be in the format of an array [0,2,1,3,...]
pred_labels: it should be in the format of an array [0,1,1,4,...]
"""
from sklearn.metrics import classification_report
logging.info("####################################")
logging.info("%s\n%s",name_cm,classification_report(y_true=true_labels, y_pred =pred_labels))
logging.info("#############")
deploy_report = classification_report(y_true=true_labels, y_pred =pred_labels,output_dict=True)
if save:
logging.info("Saving results")
cv_path = conf['results_dir']
json_dump = json.dumps(deploy_report)
f_json = open(cv_path / ("{}_report_dict.json".format(name)),"w")
f_json.write(json_dump)
f_json.close()
if plot:
plot_cm(true_labels, pred_labels,name=name_cm)
false_anom_rate_calc(true_labels,pred_labels,conf,cv_index,name,save)
class WindowShopper:
def __init__(self, data, labels, window_size = 64, trim=30, silent=False):
'''Init'''
self.data = data
self.labels = labels
if self.labels is not None:
self.label_count = len(labels['anom'].unique()) #Automatically assuming anomaly classification
self.trim = trim
self.silent = silent
#Windowed data and labels
self.windowed_data = []
self.windowed_label = []
#Output shape
self.window_size = window_size
self.metric_count = len(data.columns)
self.output_shape = (self.window_size, self.metric_count)
#Prepare windows
self._get_windowed_dataset()
#Not calling this but it is good to have
def _process_sample_count(self):
self.per_label_count = {x: 0 for x in self.labels[self.labels.columns[0]].unique()}
self.sample_count = 0
for node_id in self.data.index.get_level_values('node_id').unique():
counter = 0
cur_array = self.data.loc[node_id, :, :]
for i in range(self.trim, len(cur_array) - self.window_size - self.trim):
counter += 1
self.sample_count += counter
self.per_label_count[self.labels.loc[node_id, self.labels.columns[0]]] += counter
def _get_windowed_dataset(self):
if self.labels is not None:
#Iterate unique node_ids
for node_id in self.labels.index.unique():
# print(node_id)
cur_array = self.data.loc[node_id,:,:]
temp_data = []
temp_label = []
#Iterate over application runtime
for i in range(self.trim, len(cur_array) - self.window_size - self.trim):
self.windowed_data.append(cur_array.iloc[i:i+self.window_size].to_numpy(
dtype=np.float32).reshape(self.output_shape))
self.windowed_label.append(self.labels.loc[node_id])
self.windowed_data = np.dstack(self.windowed_data)
self.windowed_data = np.rollaxis(self.windowed_data,2)
if not self.silent:
logging.info("Windowed data shape: %s",self.windowed_data.shape)
#FIXME: column names might be in reverse order for HPAS data, Used app, anom for Cori data but it was anom,app
self.windowed_label = pd.DataFrame(np.asarray(self.windowed_label).reshape(len(self.windowed_label),2),columns=['app','anom'])
if not self.silent:
logging.info("Windowed label shape: %s",self.windowed_label.shape)
else:
logging.info("Deployment selection - no label provided")
cur_array = self.data
temp_data = []
temp_label = []
#Iterate over application runtime
for i in range(self.trim, len(cur_array) - self.window_size - self.trim):
self.windowed_data.append(cur_array.iloc[i:i+self.window_size].to_numpy(
dtype=np.float32).reshape(self.output_shape))
self.windowed_data = np.dstack(self.windowed_data)
self.windowed_data = np.rollaxis(self.windowed_data,2)
self.windowed_label = None
def return_windowed_dataset(self):
return self.windowed_data, self.windowed_label
def granularityAdjust(data,granularity=60):
result = | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function, division
from nilmtk.disaggregate import Disaggregator
from keras.layers import Conv1D, Dense, Dropout, Flatten
import pandas as pd
import numpy as np
from collections import OrderedDict
from keras.models import Sequential
from sklearn.model_selection import train_test_split
class ModelTestS2P(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "ModelTestS2P"
self.models = OrderedDict()
def partial_fit(self, train_main, train_appliances): # train_main, train_appliances为list?
train_main, train_appliances = self.call_preprocessing(train_main, train_appliances, 'train') # 调用预处理方法
train_main = pd.concat(train_main, axis=0)
train_main = train_main.values.reshape((-1, 99, 1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = | pd.concat(app_df, axis=0) | pandas.concat |
# index page
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input,Output,State
import users_mgt as um
from server import app, server
from flask_login import logout_user, current_user
from views import success, login, login_fd, logout
import data_material
import admin
import pandas as pd
import sqlalchemy
from config import engine
import base64
import dash_table
import data_material
import datetime
#new_row = pd.Series(data={'Course_Name':'Python Data Analysis', 'Course_Rating':'', 'Course_Hours':'20','Students':'0'},
#name='{}'.format(len(df.index+1)))
#df = df.append(new_row, ignore_index=False)
#df=df.drop(columns="python_data_analysis")
#df=pd.read_sql_table('user',con='sqlite:///users.db')
#df=df.drop(3)
#df.insert(loc=4,column='Students',value='',allow_duplicates=False)
#df.to_sql("user", con='sqlite:///users.db', if_exists='replace', index=False)
#um.add_course('Python Data Analysis','',20,0)
#um.add_course('Machine Learning','',27,0)
#um.edit_sql_cell('python_data_analysis','student_id',1,1)
#df.insert(loc=0,column='student_id',value='1',allow_duplicates=False)
#df.set_index('student_id',inplace=True)
#df.to_sql("python_data_analysis", con='sqlite:///users.db', if_exists='replace', index=True,index_label='student_id')
#df.to_sql("courses", con='sqlite:///users.db', if_exists='replace', index=False)
#index=df.index[df['id'] == '2'].tolist()
#print( um.read_sql_cell('user','username', index[0] ))
#print(len(df.index))
#print(df)
#https://kanoki.org/2019/04/12/pandas-how-to-get-a-cell-value-and-update-it/
#https://www.youtube.com/watch?v=skGwKh1dAdk
encoded = base64.b64encode(open('logo.png', 'rb').read())
logo_img=dbc.Row([dbc.Col([
html.Img(src='data:image/png;base64,{}'.format(encoded.decode()), id='logo_img', height=80)
] ,
xs=dict(size=12,offset=0), sm=dict(size=12,offset=0),
md=dict(size=12,offset=0), lg=dict(size=12,offset=0), xl=dict(size=12,offset=0))
])
encoded2 = base64.b64encode(open('bg4.jpg', 'rb').read())
bg_img=html.Img(src='data:image/png;base64,{}'.format(encoded2.decode()), id='bg_img', height='800rem',width='100%')
header_text=html.Div('Learning Made Easy',style=dict(color='black',
fontWeight='bold',fontSize='1.4rem',marginTop='1rem',marginLeft='3rem'))
please_login_text=html.Div('Please login to continue..',style=dict(color='black',
fontWeight='bold',fontSize='1.4rem',marginTop='1rem',marginLeft='3rem'))
logout_msg=html.Div(id='logout')
search_input=dbc.Input(id="input", placeholder="Search here..", type="text",bs_size="lg",
style=dict(marginTop='1rem',fontSize='1.1rem'))
search_button= dbc.Button("Search", color="primary", size='lg', n_clicks=0,
style=dict(marginTop='1rem',fontSize='1.1rem'))
logout_button= dbc.Button("Logout", color="primary", size='md', n_clicks=0,id='logout_btn',
style=dict(marginTop='0.3rem',fontSize='1.1rem',marginLeft='2.5rem'))
db_logo_img=dbc.Col([ logo_img] ,
xs=dict(size=2,offset=0), sm=dict(size=2,offset=0),
md=dict(size=2,offset=0), lg=dict(size=2,offset=0), xl=dict(size=1,offset=0))
db_header_text= dbc.Col([ header_text] ,
xs=dict(size=8,offset=0), sm=dict(size=8,offset=0),
md=dict(size=2,offset=0), lg=dict(size=3,offset=0), xl=dict(size=3,offset=0))
db_search_input=dbc.Col([search_input],
xs=dict(size=5, offset=2), sm=dict(size=5, offset=2),
md=dict(size=2, offset=2), lg=dict(size=2, offset=2), xl=dict(size=2, offset=1))
db_search_button=dbc.Col([search_button],
xs=dict(size=2, offset=0), sm=dict(size=2, offset=0),
md=dict(size=2, offset=0), lg=dict(size=2, offset=0), xl=dict(size=2, offset=0))
db_please_login_text= dbc.Col([ please_login_text] ,
xs=dict(size=8,offset=0), sm=dict(size=8,offset=0),
md=dict(size=2,offset=0), lg=dict(size=3,offset=0), xl=dict(size=3,offset=0))
data_progress=dbc.Progress(children=[], max=100, striped=True, color="primary",id='progress',
style=dict(height='20px',backgroundColor='white',fontWeight='bold'),
bar_style=dict(color='black'))
data_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://res.cloudinary.com/dyd911kmh/image/upload/f_auto,q_auto:best/v1567221927/image_3_ayi4rs.png", top=True),
dbc.CardBody(
[
html.H5("Python Data Analysis", className="card-title",style=dict(color='black')),
html.P(
"using pandas python package to analyze data and make reports",
style=dict(color='black')
),
dbc.Nav([ dbc.NavItem(dbc.NavLink("Details", active=True, href="/data", id='data_details')) ],pills=True)
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
data_course_card_progress=dbc.Col([html.Br(),dbc.CardImg(src="https://res.cloudinary.com/dyd911kmh/image/upload/f_auto,q_auto:best/v1567221927/image_3_ayi4rs.png", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Python Data Analysis", className="card-title",style=dict(color='black')),
html.P(
"using pandas python package to analyze data and make reports",
style=dict(color='black')
),
dbc.Nav([ dbc.NavItem(dbc.NavLink("Details", active=True, href="/data", id='data_details')) ],pills=True),html.Br(),data_progress
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
ml_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://iraqcoders.com/wp-content/uploads/2019/02/emerging-tech_ai_machine-learning-100748222-large.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Machine Learning", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to implement basic machine learning ",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
sql_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://media.onlinecoursebay.com/2019/08/27030502/2488822_25d1-750x405.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("SQL basics", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to deal with different types of databases",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
image_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://images-na.ssl-images-amazon.com/images/I/61gBVmFtNpL.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Image Processing", className="card-title",style=dict(color='black')),
html.P(
"you will understand how to use opencv for image processing",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
iot_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://cdn.mindmajix.com/courses/iot-training.png", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Internet Of Things", className="card-title",style=dict(color='black')),
html.P(
"you will understand how IoT devices and systems works",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
embedded_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://prod-discovery.edx-cdn.org/media/course/image/785cf551-7f66-4350-b736-64a93427b4db-3dcdedbdf99d.small.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Embedded Systems", className="card-title",style=dict(color='black')),
html.P(
"you will learn embedded software techniques using tivac board",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
arch_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://moodle.aaup.edu/pluginfile.php/288902/course/overviewfiles/Computer-Architecture.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Computer Architecture", className="card-title",style=dict(color='black')),
html.P(
"you will learn how memory and cpu works in details",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
web_course_card=dbc.Col([html.Br(),dbc.CardImg(src="https://www.onlinecoursereport.com/wp-content/uploads/2020/07/shutterstock_394793860-1024x784.jpg", top=True,
style=dict(height='20vh')),
dbc.CardBody(
[
html.H5("Web development", className="card-title",style=dict(color='black')),
html.P(
"you will learn to develop website using html,css and javascript",
style=dict(color='black')
),
dbc.Button("Details", color="primary"),
] ,style=dict(backgroundColor='#f0ad4e')
)
] ,xl=dict(size=2,offset=1),lg=dict(size=2,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
courses_layout=dbc.Row([ data_course_card, ml_course_card,sql_course_card,image_course_card,
iot_course_card,embedded_course_card ,arch_course_card,web_course_card
] , no_gutters=False)
rate_button= dbc.Button("Rate Course", color="primary", size='lg', n_clicks=0,id='rate_button',
style=dict(marginTop='1rem',fontSize='1.1rem'))
rate_input= html.Div([dbc.Input(id="rate_input", placeholder="0-5 Stars", type="number",bs_size="lg",
min=1, max=5,
style=dict(fontSize='1.1rem')) ] )
submit_rating_button= dbc.Button("Submit", color="primary", size='lg', n_clicks=0, id='submit_rating_button',
style=dict(marginTop='1rem',fontSize='1.1rem'))
rating_input=dbc.Collapse([ rate_input, submit_rating_button
],id="collapse",is_open=False,
style=dict(border='0.5vh solid black')
)
rate_div=html.Div([rate_button,html.Br(),html.Br(),rating_input
] )
sidebar = html.Div(
[
html.H2("Course Content", className="display-4", style=dict(color='black',fontWeight='bold')),
html.Hr(style=dict(color='black')),
html.P(
"Welcome to the course , you can start your lessons bellow ..",className="lead", style=dict(color='black')
),
dbc.Nav(
[
dbc.NavLink("Session1", href="/data/video1", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session2", href="/data/video2", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session3", href="/data/video3", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session4", href="/data/video4", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session5", href="/data/video5", active="exact",style=dict(fontWeight='bold')),
dbc.NavLink("Session6", href="/data/video6", active="exact",style=dict(fontWeight='bold'))
],
vertical=True,
pills=True,
),rate_div
],
style=dict(backgroundColor='#f0ad4e',height='100%')
)
star_img = 'star.jpg'
encoded = base64.b64encode(open(star_img, 'rb').read())
star_image = html.Img(src='data:image/png;base64,{}'.format(encoded.decode()), id='img1', height=40, width=40)
star_image_div=html.Div(star_image, style=dict(display='inline-block'))
data_course_header=html.Div([html.Br(),html.H1('Python Data analysis and visualization course',style=dict(fontSize=36)),
html.Div(' Rating : 4.5/5',style=dict(fontSize=22,display='inline-block'),id='stars'),
star_image_div,html.Div ('Students : 23',style=dict(fontSize=22),id='students'),
html.Div('Total Hours : 20 hour',style=dict(fontSize=22)),html.Br(),
dbc.Nav([dbc.NavItem(dbc.NavLink("Enroll Course", active=True, href="/data/video1", id='enroll_data'))],
pills=True),html.Br()
] , style=dict(color='white',border='4px #f0ad4e solid'))
data_course_Req= html.Div([html.H1('Requirements',style=dict(fontSize=32)),
html.Div(style=dict(border='3px #f0ad4e solid',width='100%',height='5px')),html.Br(),
html.Div(' 1-Basic math skills',style=dict(fontSize=22)),
html.Div ('2-Basic to Intermediate Python Skills.',style=dict(fontSize=22)),
html.Div ('3-Have a computer (either Mac, Windows, or Linux.',style=dict(fontSize=22))
] , style=dict(color='white'))
data_course_desc=html.Div([html.H1('Description',style=dict(fontSize=32)),
html.Div(style=dict(border='3px #f0ad4e solid',width='100%',height='5px')),
html.Div('Our goal is to provide you with complete preparation. And this course will turn you into a job-ready data analyst.'
' To take you there, we will cover the following fundamental topics extensively.',
style=dict(fontSize=22,color='white')),
html.Div('1- Theory about the field of data analytics',style=dict(fontSize=22,color='white')),
html.Div('2- Basic and Advanced python',style=dict(fontSize=22,color='white')),
html.Div('3- Pandas and Numpy libraries'),
html.Div('4- Data collection ,Cleaning and Visualization',style=dict(fontSize=22,color='white'))
] )
data_video1_youtube=html.Div(children=[
html.Iframe(width="100%%", height="474", src="https://www.youtube.com/embed/nLw1RNvfElg"
, title="YouTube video player"
),
])
data_quiz1_header=html.Div('A graph used in statistics to demonstrate how many of a certain type of variable occurs within a specific range',
style=dict(fontSize=22,color='black',fontWeight='black') )
data_quiz1_choices=dcc.RadioItems(
options=[
{'label': 'Bar plot', 'value': 'bar'},
{'label': 'Histogram ', 'value': 'hist'},
{'label': 'Scatter plot', 'value': 'scat'},
{'label': 'Box plot', 'value': 'box'}
],
value='',labelStyle=dict(display='block',color='black',marginLeft='1rem',fontSize=22),
inputStyle=dict(width='1.2rem',height='1.2rem',marginRight='0.5rem') ,id='data_quiz1_choices',
style=dict(marginLeft='4rem') , persistence=True
)
data_quiz1_submit=dbc.Button("Submit", color="primary", size='lg', n_clicks=0,id='data_quiz1_submit',
style=dict(marginTop='0.3rem',fontSize='1.1rem'))
data_quiz1_answer=html.Div('',style=dict(fontSize=22,color='white',fontWeight='bold'),id='data_quiz1_answer')
data_quiz1=html.Div([ html.H1('Quiz1',style=dict(fontSize=32,color='black')),data_quiz1_header,html.Br(),
html.Hr(style=dict(color='black')) ,data_quiz1_choices
] ,style=dict(backgroundColor='#f0ad4e') )
data_video1_layout=dbc.Row([dbc.Col([html.Br(),sidebar
] ,xl=dict(size=2,offset=0),lg=dict(size=2,offset=0),
md=dict(size=5,offset=0),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1)
) ,
dbc.Col([ html.Br(),data_video1_youtube,html.Br(),html.Br(),
data_quiz1,data_quiz1_submit,html.Br(),data_quiz1_answer
] ,xl=dict(size=5,offset=2),lg=dict(size=5,offset=2),
md=dict(size=3,offset=1),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1) )
] , no_gutters=False )
data_details_layout=dbc.Row([dbc.Col([html.Br(),data_course_header
] ,xl=dict(size=6,offset=1),lg=dict(size=6,offset=1),
md=dict(size=8,offset=1),sm=dict(size=10,offset=1),xs=dict(size=10,offset=1)
) ,
dbc.Col([ html.Br(),data_course_Req,html.Br(),data_course_desc
] ,xl=dict(size=5,offset=1),lg=dict(size=5,offset=1),
md=dict(size=3,offset=1),sm=dict(size=8,offset=1),xs=dict(size=8,offset=1) )
] , no_gutters=False )
app.layout = html.Div(
[
dbc.Row([ db_logo_img ,db_header_text
] ,no_gutters=False,style=dict(backgroundColor='#f0ad4e'),id='header' )
,
html.Div(id='page-content')
, html.Div( [] , id='page-content2'),
dcc.Location(id='url', refresh=True) , html.Div([''],id='hidden_div1',style=dict(display='none')),
html.Div([''],id='hidden_div2',style=dict(display='none')) ,
dcc.Interval(id='my_interval', interval=1500)
]
)
# <iframe width="843" height="474" src="https://www.youtube.com/embed/nLw1RNvfElg"
# title="YouTube video player" frameborder="0"
# allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
print( df['Course_Rating'][2] )
a=3.5666667
print("%.2f" % round(a,2))
@app.callback([Output('stars','children'),Output('students','children')],
Input('url', 'pathname'))
def update_data_details(path):
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
rating_sum=0
rating_students=0
for rating in range(0,len(df.index) ):
if um.read_sql_cell('python_data_analysis','Course_Rating',rating) != '':
rating_students+=1
rating_sum= int(rating_sum) + int(df['Course_Rating'][rating])
stars_avg=int(rating_sum)/rating_students
students_num=len(df.index)
return (' Rating : {}/5'.format("%.2f" % round(stars_avg,1)),'Students : {}'.format(students_num))
@app.callback([Output('page-content', 'children'),Output('header','children'),Output('page-content2','children')],
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/':
return (login.layout, [db_logo_img , db_header_text,db_please_login_text],[])
elif pathname == '/login':
return (login.layout,[ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/success':
if current_user.is_authenticated:
if current_user.username=='admin':
return ([admin.layout,html.Br(),logout_button,dcc.Location(id='url_login_success', refresh=True)],[],[])
else:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text,logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text] ,[bg_img])
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data_course_table':
if current_user.is_authenticated:
return (admin.layout2, [], [])
else:
return (login_fd.layout, [db_logo_img, db_header_text, db_please_login_text], [])
elif pathname == '/Add_Course':
if current_user.is_authenticated:
return (admin.layout3, [], [])
else:
return (login_fd.layout, [db_logo_img, db_header_text, db_please_login_text], [])
elif pathname == '/logout':
if current_user.is_authenticated:
logout_user()
return (logout.layout, [ db_logo_img , db_header_text ,db_please_login_text],[])
else:
return (logout.layout,[ db_logo_img , db_header_text ] ,db_please_login_text,[] )
#"https://www.youtube.com/embed/ln8dyS2y4Nc"
elif pathname == "/Courses":
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
, [courses_layout] )
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_details_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video1':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_video1_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video2':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video2_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video3':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video3_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video4':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video4_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video5':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video5_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == '/data/video6':
if current_user.is_authenticated:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [db_logo_img, db_header_text,
db_search_input, db_search_button,db_username_text]
,data_material.data_video6_layout)
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
elif pathname == "/My-Courses":
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db')
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist()
try:
um.read_sql_cell('python_data_analysis', 'Enrolled', index[0])
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[data_course_card_progress] )
except:
username_text = html.Div(['Current user: ' + current_user.username], id='user-name',
style=dict(color='black',
fontWeight='bold', fontSize='1.1rem', marginTop='1rem',
marginLeft='1rem'))
db_username_text = dbc.Col([username_text, logout_button],
xs=dict(size=8, offset=0), sm=dict(size=8, offset=0),
md=dict(size=2, offset=0), lg=dict(size=3, offset=0), xl=dict(size=3, offset=0))
return (success.layout, [ db_logo_img , db_header_text ,
db_search_input,db_search_button,db_username_text]
,[html.H1('you dont have courses yet',
style={'textAlign':'center'})
] )
else:
return (login_fd.layout, [ db_logo_img , db_header_text,db_please_login_text ],[] )
# If the user tries to reach a different page, return a 404 message
return ( html.H1("404: Not found", className="text-danger"), [],[] )
@app.callback(
Output('user-name', 'children'),
[Input('page-content', 'children')])
def cur_user(input1):
if current_user.is_authenticated:
return html.Div('Current user: ' + current_user.username)
# 'User authenticated' return username in get_id()
else:
return ''
@app.callback(
Output('logout', 'children'),
[Input('page-content', 'children')])
def user_logout(input1):
if current_user.is_authenticated:
return html.A('Logout', href='/logout')
else:
return ''
# first input is the button clicks , second input is quiz answer picked up by student
# first output is the msg apear after user enter answer second output is the style of this msg ( color )
@app.callback([Output('data_quiz1_answer', 'children') , Output('data_quiz1_answer', 'style') ],
Input('data_quiz1_submit', 'n_clicks'),State('data_quiz1_choices', 'value') )
def data_quiz1_answer(clicks,answer):
if answer=='hist': # check if answer is the correct answer
if current_user.is_authenticated:
df = pd.read_sql_table('python_data_analysis', con='sqlite:///users.db') #reading course table in database
index = df.index[df['student_id'] == '{}'.format(current_user.id)].tolist() # reading the id of the current user
ans=um.read_sql_cell('python_data_analysis','quiz1_state',index[0]) # reading the quiz1 answer that is recorded in database
progress=um.read_sql_cell('python_data_analysis','Course_progress',index[0]) # reading the course progress for the current user
new_progress = '{}{}'.format(int(progress.split('%')[0]) + 10, '%') # increase the course progress
if ans=='': # check if user already answered the quiz or its the first time
um.edit_sql_cell('python_data_analysis','quiz1_state',index[0],'passed') # update the quiz1 state to passed
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress) # update the course progress in database
return ('Correct Answer , Nice work..',dict(fontSize=22,color='green',fontWeight='bold')) # change the output string
elif ans=='passed':
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold')) # user already answered so no update in database only return string
elif ans == 'failed':
um.edit_sql_cell('python_data_analysis', 'Course_progress', index[0], new_progress)
um.edit_sql_cell('python_data_analysis', 'quiz1_state', index[0], 'passed')
return ('Correct Answer , Nice work..', dict(fontSize=22, color='green', fontWeight='bold'))
elif answer=='':
return ('',dict(fontSize=22,color='green',fontWeight='bold'))
else:
if current_user.is_authenticated:
df = | pd.read_sql_table('python_data_analysis', con='sqlite:///users.db') | pandas.read_sql_table |
"""
Tools to clean Balancing area data.
A data cleaning step is performed by an object that subclasses
the `BaDataCleaner` class.
"""
import os
import logging
import time
import re
from gridemissions.load import BaData
from gridemissions.eia_api import SRC, KEYS
import pandas as pd
import numpy as np
from collections import defaultdict
import cvxpy as cp
import dask
A = 1e4 # MWh
GAMMA = 10 # MWh
EPSILON = 1 # MWh
def na_stats(data, title, cols):
"""
Print NA statistics for a subset of a dataframe.
"""
print(
"%s:\t%.2f%%"
% (
title,
(
data.df.loc[:, cols].isna().sum().sum()
/ len(data.df)
/ len(data.df.loc[:, cols].columns)
* 100
),
)
)
class BaDataCleaner(object):
"""
Template class for data cleaning.
This is mostly just a shell to show how cleaning classes should operate.
"""
def __init__(self, ba_data):
"""
Parameters
----------
ba_data : BaData object
"""
self.d = ba_data
self.logger = logging.getLogger("clean")
def process(self):
pass
class BaDataBasicCleaner(BaDataCleaner):
"""
Basic data cleaning class.
We run this as the first step of the cleaning process.
"""
def process(self):
self.logger.info("Running BaDataBasicCleaner")
start = time.time()
data = self.d
missing_D_cols = [col for col in data.NG_cols if col not in data.D_cols]
self.logger.info("Adding demand columns for %d bas" % len(missing_D_cols))
for ba in missing_D_cols:
data.df.loc[:, data.KEY["D"] % ba] = 1.0
data.df.loc[:, data.KEY["NG"] % ba] -= 1.0
data.df.loc[:, data.KEY["TI"] % ba] -= 1.0
# AVRN only exports to BPAT - this is missing for now
if "AVRN" not in data.ID_cols:
self.logger.info("Adding trade columns for AVRN")
ba = "AVRN"
ba2 = "BPAT"
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = (
data.df.loc[:, data.KEY["NG"] % ba] - 1.0
)
data.df.loc[:, data.KEY["ID"] % (ba2, ba)] = (
-data.df.loc[:, data.KEY["NG"] % ba] + 1.0
)
# Add columns for biomass and geothermal for CISO
# We are assuming constant generation for each of these sources
# based on historical data. Before updating this, need to
# contact the EIA API maintainers to understand why this isn't
# reported and where to find it
self.logger.info("Adding GEO and BIO columns for CISO")
data.df.loc[:, "EBA.CISO-ALL.NG.GEO.H"] = 900.0
data.df.loc[:, "EBA.CISO-ALL.NG.BIO.H"] = 600.0
# data.df.loc[:, "EBA.CISO-ALL.NG.H"] += 600.0 + 900.0
# Add columns for the BAs that are outside of the US
foreign_bas = list(
set([col for col in data.ID_cols2 if col not in data.NG_cols])
)
self.logger.info(
"Adding demand, generation and TI columns for %d foreign bas"
% len(foreign_bas)
)
for ba in foreign_bas:
trade_cols = [col for col in data.df.columns if "%s.ID.H" % ba in col]
TI = -data.df.loc[:, trade_cols].sum(axis=1)
data.df.loc[:, data.KEY["TI"] % ba] = TI
exports = TI.apply(lambda x: max(x, 0))
imports = TI.apply(lambda x: min(x, 0))
data.df.loc[:, data.KEY["D"] % ba] = -imports
data.df.loc[:, data.KEY["NG"] % ba] = exports
if ba in ["BCHA", "HQT", "MHEB"]:
# Assume for these Canadian BAs generation is hydro
data.df.loc[:, data.KEY["SRC_WAT"] % ba] = exports
else:
# And all others are OTH (other)
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = exports
for col in trade_cols:
ba2 = re.split(r"\.|-|_", col)[1]
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = -data.df.loc[:, col]
# Make sure that trade columns exist both ways
for col in data.get_cols(field="ID"):
ba = re.split(r"\.|-|_", col)[1]
ba2 = re.split(r"\.|-|_", col)[2]
othercol = data.KEY["ID"] % (ba2, ba)
if othercol not in data.df.columns:
self.logger.info("Adding %s" % othercol)
data.df.loc[:, othercol] = -data.df.loc[:, col]
# Filter unrealistic values using self.reject_dict
self._create_reject_dict()
cols = (
data.get_cols(field="D")
+ data.get_cols(field="NG")
+ data.get_cols(field="TI")
+ data.get_cols(field="ID")
)
for col in cols:
s = data.df.loc[:, col]
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0]) & (s <= self.reject_dict[col][1])
)
# Do the same for the generation by source columns
# If there is no generation by source, add one that is OTH
# Edge case for solar:
# There are a lot of values at -50 MWh or so during the night. We want
# to set those to 0, but consider that very negative values (below
# -1GW) are rejected
for ba in data.regions:
missing = True
for src in SRC:
col = data.KEY["SRC_%s" % src] % ba
if col in data.df.columns:
missing = False
s = data.df.loc[:, col]
if src == "SUN":
self.reject_dict[col] = (-1e3, 200e3)
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0])
& (s <= self.reject_dict[col][1])
)
if src == "SUN":
data.df.loc[:, col] = data.df.loc[:, col].apply(
lambda x: max(x, 0)
)
if missing:
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = data.df.loc[
:, data.KEY["NG"] % ba
]
# Reinitialize fields
self.logger.info("Reinitializing fields")
data = BaData(df=data.df)
self.r = data
self.logger.info("Basic cleaning took %.2f seconds" % (time.time() - start))
def _create_reject_dict(self):
"""
Create a defaultdict to store ranges outside of which values are
considered unrealistic.
The default range is (-1., 200e3) MW. Manual ranges can be set for
specific columns here if that range is not strict enough.
"""
reject_dict = defaultdict(lambda: (-1.0, 200e3))
for col in self.d.get_cols(field="TI"):
reject_dict[col] = (-100e3, 100e3)
for col in self.d.get_cols(field="ID"):
reject_dict[col] = (-100e3, 100e3)
reject_dict["EBA.AZPS-ALL.D.H"] = (1.0, 30e3)
reject_dict["EBA.BANC-ALL.D.H"] = (1.0, 6.5e3)
reject_dict["EBA.BANC-ALL.TI.H"] = (-5e3, 5e3)
reject_dict["EBA.CISO-ALL.NG.H"] = (5e3, 60e3)
self.reject_dict = reject_dict
def rolling_window_filter(
df,
offset=10 * 24,
min_periods=100,
center=True,
replace_nan_with_mean=True,
return_mean=False,
):
"""
Apply a rolling window filter to a dataframe.
Filter using dynamic bounds: reject points that are farther than 4 standard
deviations from the mean, using a rolling window to compute the mean and
standard deviation.
Parameters
----------
df : pd.DataFrame
Dataframe to filter
offset : int
Passed on to pandas' rolling function
min_periods : int
Passed on to pandas' rolling function
center : bool
Passed on to pandas' rolling function
replace_nan_with_mean : bool
Whether to replace NaNs with the mean of the timeseries at the end of
the procedure
Notes
-----
Keeps at least 200 MWh around the mean as an acceptance range.
"""
for col in df.columns:
rolling_ = df[col].rolling(offset, min_periods=min_periods, center=center)
mean_ = rolling_.mean()
std_ = rolling_.std().apply(lambda x: max(100, x))
ub = mean_ + 4 * std_
lb = mean_ - 4 * std_
idx_reject = (df[col] >= ub) | (df[col] <= lb)
df.loc[idx_reject, col] = np.nan
if replace_nan_with_mean:
# First try interpolating linearly, but only for up to 3 hours
df.loc[:, col] = df.loc[:, col].interpolate(limit=3)
# If there is more than 3 hours of missing data, use rolling mean
df.loc[df[col].isnull(), col] = mean_.loc[df[col].isnull()]
if return_mean:
mean_ = df.rolling(offset, min_periods=min_periods, center=center).mean()
return (df, mean_)
return df
class BaDataRollingCleaner(BaDataCleaner):
"""
Rolling window cleaning.
This applies the `rolling_window_filter` function to the dataset. In order
to apply this properly to the beginning of the dataset, we load past data
that will be used for the cleaning - that is then dropped.
"""
def process(self, file_name="", folder_hist="", nruns=2):
"""
Processor function for the cleaner object.
Parameters
----------
file_name : str
Base name of the file from which to read historical data.
Data is read from "%s_basic.csv" % file_name
folder_hist : str
Folder from which to read historical data
nruns : int
Number of times to apply the rolling window procedure
Notes
-----
If we are not processing a large amount of data at a time, we may not
have enough data to appropriately estimate the rolling mean and
standard deviation for the rolling window procedure. If values are
given for `file_name` and `folder_hist`, data will be read from a
historical dataset to estimate the rolling mean and standard deviation.
If there are very large outliers, they can 'mask' smaller outliers.
Running the rolling window procedure a couple of times helps with this
issue.
"""
self.logger.info("Running BaDataRollingCleaner (%d runs)" % nruns)
start = time.time()
data = self.d
# Remember what part we are cleaning
idx_cleaning = data.df.index
try:
# Load the data we already have in memory
df_hist = pd.read_csv(
os.path.join(folder_hist, "%s_basic.csv" % file_name),
index_col=0,
parse_dates=True,
)
# Only take the last 1,000 rows
# Note that if df_hist has less than 1,000 rows,
# pandas knows to select df_hist without raising an error.
df_hist = df_hist.iloc[-1000:]
# Overwrite with the new data
old_rows = df_hist.index.difference(data.df.index)
df_hist = data.df.append(df_hist.loc[old_rows, :], sort=True)
df_hist.sort_index(inplace=True)
except FileNotFoundError:
self.logger.info("No history file")
df_hist = data.df
# Apply rolling horizon threshold procedure
# 20200206 update: don't try replacing NaNs anymore, leave that to the
# next step
for _ in range(nruns):
df_hist = rolling_window_filter(df_hist, replace_nan_with_mean=False)
# Deal with NaNs
# First deal with NaNs by taking the average of the previous day and
# next day. In general we observe strong daily patterns so this seems
# to work well. Limit the filling procedure to one day at a time. If
# there are multiple missing days, this makes for a smoother transition
# between the two valid days. If we had to do this more than 4 times,
# give up and use forward and backward fills without limits
for col in df_hist.columns:
npasses = 0
while (df_hist.loc[:, col].isna().sum() > 0) and (npasses < 4):
npasses += 1
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(limit=1),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(limit=1),
],
axis=1,
).mean(axis=1)
if npasses == 4:
self.logger.debug("A lot of bad data for %s" % col)
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(),
],
axis=1,
).mean(axis=1)
# All bad data columns
if df_hist.loc[:, col].isna().sum() == len(df_hist):
df_hist.loc[:, col] = 0.0
# Some NaNs will still remain - try using the rolling mean average
df_hist, mean_ = rolling_window_filter(
df_hist, replace_nan_with_mean=True, return_mean=True
)
if df_hist.isna().sum().sum() > 0:
self.logger.warning("There are still some NaNs. Unexpected")
# Just keep the indices we are working on currently
data = BaData(df=df_hist.loc[idx_cleaning, :])
self.r = data
self.weights = mean_.loc[idx_cleaning, :].applymap(
lambda x: A / max(GAMMA, abs(x))
)
self.logger.info(
"Rolling window cleaning took %.2f seconds" % (time.time() - start)
)
class BaDataPyoCleaningModel(object):
"""
Create an AbstractModel() for the cleaning problem.
No data is passed into this model at this point, it is
simply written in algebraic form.
"""
def __init__(self):
m = pyo.AbstractModel()
# Sets
m.regions = pyo.Set()
m.srcs = pyo.Set()
m.regions2 = pyo.Set(within=m.regions * m.regions)
m.regions_srcs = pyo.Set(within=m.regions * m.srcs)
# Parameters
m.D = pyo.Param(m.regions, within=pyo.Reals)
m.NG = pyo.Param(m.regions, within=pyo.Reals)
m.TI = pyo.Param(m.regions, within=pyo.Reals)
m.ID = pyo.Param(m.regions2, within=pyo.Reals)
m.NG_SRC = pyo.Param(m.regions_srcs, within=pyo.Reals)
m.D_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.NG_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.TI_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.ID_W = pyo.Param(m.regions2, default=1.0, within=pyo.Reals)
m.NG_SRC_W = pyo.Param(m.regions_srcs, default=1.0, within=pyo.Reals)
# Variables
# delta_NG_aux are aux variable for the case where there
# are no SRC data. In that case, the NG_sum constraint would
# only have: m.NG + m.delta_NG = 0.
m.delta_D = pyo.Var(m.regions, within=pyo.Reals)
m.delta_NG = pyo.Var(m.regions, within=pyo.Reals)
m.delta_TI = pyo.Var(m.regions, within=pyo.Reals)
m.delta_ID = pyo.Var(m.regions2, within=pyo.Reals)
m.delta_NG_SRC = pyo.Var(m.regions_srcs, within=pyo.Reals)
# m.delta_NG_aux = pyo.Var(m.regions, within=pyo.Reals)
# Constraints
m.D_positive = pyo.Constraint(m.regions, rule=self.D_positive)
m.NG_positive = pyo.Constraint(m.regions, rule=self.NG_positive)
m.NG_SRC_positive = pyo.Constraint(m.regions_srcs, rule=self.NG_SRC_positive)
m.energy_balance = pyo.Constraint(m.regions, rule=self.energy_balance)
m.antisymmetry = pyo.Constraint(m.regions2, rule=self.antisymmetry)
m.trade_sum = pyo.Constraint(m.regions, rule=self.trade_sum)
m.NG_sum = pyo.Constraint(m.regions, rule=self.NG_sum)
# Objective
m.total_penalty = pyo.Objective(rule=self.total_penalty, sense=pyo.minimize)
self.m = m
def D_positive(self, model, i):
return (model.D[i] + model.delta_D[i]) >= EPSILON
def NG_positive(self, model, i):
return (model.NG[i] + model.delta_NG[i]) >= EPSILON
def NG_SRC_positive(self, model, k, s):
return model.NG_SRC[k, s] + model.delta_NG_SRC[k, s] >= EPSILON
def energy_balance(self, model, i):
return (
model.D[i]
+ model.delta_D[i]
+ model.TI[i]
+ model.delta_TI[i]
- model.NG[i]
- model.delta_NG[i]
) == 0.0
def antisymmetry(self, model, i, j):
return (
model.ID[i, j]
+ model.delta_ID[i, j]
+ model.ID[j, i]
+ model.delta_ID[j, i]
== 0.0
)
def trade_sum(self, model, i):
return (
model.TI[i]
+ model.delta_TI[i]
- sum(
model.ID[k, l] + model.delta_ID[k, l]
for (k, l) in model.regions2
if k == i
)
) == 0.0
def NG_sum(self, model, i):
return (
model.NG[i]
+ model.delta_NG[i] # + model.delta_NG_aux[i]
- sum(
model.NG_SRC[k, s] + model.delta_NG_SRC[k, s]
for (k, s) in model.regions_srcs
if k == i
)
) == 0.0
def total_penalty(self, model):
return (
sum(
(
model.D_W[i] * model.delta_D[i] ** 2
+ model.NG_W[i] * model.delta_NG[i] ** 2
# + model.delta_NG_aux[i]**2
+ model.TI_W[i] * model.delta_TI[i] ** 2
)
for i in model.regions
)
+ sum(
model.ID_W[i, j] * model.delta_ID[i, j] ** 2
for (i, j) in model.regions2
)
+ sum(
model.NG_SRC_W[i, s] * model.delta_NG_SRC[i, s] ** 2
for (i, s) in model.regions_srcs
)
)
class BaDataPyoCleaner(BaDataCleaner):
"""
Optimization-based cleaning class.
Uses pyomo to build the model and Gurobi as the default solver.
"""
def __init__(self, ba_data, weights=None, solver="gurobi"):
super().__init__(ba_data)
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
self.m = BaDataPyoCleaningModel().m
self.opt = SolverFactory(solver)
self.weights = weights
if weights is not None:
self.d.df = pd.concat(
[self.d.df, weights.rename(lambda x: x + "_W", axis=1)], axis=1
)
def process(self, debug=False):
start = time.time()
self.logger.info("Running BaDataPyoCleaner for %d rows" % len(self.d.df))
self.d.df = self.d.df.fillna(0)
if not debug:
self.r = self.d.df.apply(self._process, axis=1)
else:
r_list = []
delta_list = []
for idx, row in self.d.df.iterrows():
_, r, deltas = self._process(row, debug=True)
r_list.append(r)
delta_list.append(deltas)
self.r = | pd.concat(r_list, axis=1) | pandas.concat |
import os
import sys
import json
import yaml
import pandas as pd
from ananke.graphs import ADMG
from networkx import DiGraph
from optparse import OptionParser
sys.path.append(os.getcwd())
sys.path.append('/root')
from src.causal_model import CausalModel
from src.generate_params import GenerateParams
def config_option_parser():
"""This function is used to configure option parser
@returns:
options: option parser handle"""
usage = """USAGE: %python3 run_unicorn_debug.py -o [objectives] -d [init_data] -s [software] -k [hardware] -m [mode] -i [bug_index]
"""
parser = OptionParser(usage=usage)
parser.add_option('-o', '--objective', dest='obj',
default=[], nargs=1, type='choice',
choices=('inference_time', 'total_energy_consumption', 'total_temp'), action='append', help="objective type")
parser.add_option('-s', "--software", action="store",
type="string", dest="software", help="software")
parser.add_option('-k', "--hardware", action="store",
type="string", dest="hardware", help="hardware")
parser.add_option('-m', "--mode", action="store",
type="string", dest="mode", help="mode")
parser.add_option('-i', "--bug_index", action="store",
type="string", dest="bug_index", help="bug_index")
(options, args) = parser.parse_args()
return options
def run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS):
"""This function is used to run unicorn in a loop"""
# NOTEARS causal model hyperparmas
#_, notears_edges = CM.learn_entropy(df, tabu_edges, 0.75)
# get bayesian network from DAG obtained
# bn = BayesianNetwork(sm)
fci_edges = CM.learn_fci(df, tabu_edges)
edges = []
# resolve notears_edges and fci_edges and update
di_edges, bi_edges = CM.resolve_edges(edges, fci_edges, columns,
tabu_edges, NUM_PATHS, options.obj)
# construct mixed graph ADMG
G = ADMG(columns, di_edges=di_edges, bi_edges=bi_edges)
return G, di_edges, bi_edges
if __name__ == "__main__":
query = 0.8
NUM_PATHS = 25
options = config_option_parser()
# Initialization
with open(os.path.join(os.getcwd(), "etc/config.yml")) as file:
cfg = yaml.load(file, Loader=yaml.FullLoader)
# nodes for causal graph
soft_columns = cfg["software_columns"][options.software]
hw_columns = cfg["hardware_columns"][options.hardware]
kernel_columns = cfg["kernel_columns"]
perf_columns = cfg["perf_columns"]
obj_columns = options.obj
columns = soft_columns + hw_columns + kernel_columns + perf_columns + obj_columns
conf_opt = soft_columns + hw_columns + kernel_columns
if len(options.obj) > 1:
init_dir = os.path.join(os.getcwd(), cfg["init_dir"], "multi",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + "initial.csv")
bug_dir = os.path.join(os.getcwd(), cfg["bug_dir"], "multi", options.hardware,
options.software, options.hardware + "_" + options.software + "_" + "multi.csv")
with open(os.path.join(os.getcwd(), cfg["debug_dir"], "multi", options.hardware,
options.software, "measurement.json")) as mfl:
m = json.load(mfl)
else:
init_dir = os.path.join(os.getcwd(), cfg["init_dir"], "single",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + "initial.csv")
bug_dir = os.path.join(os.getcwd(), cfg["bug_dir"], "single",
options.hardware, options.software, options.hardware + "_" + options.software + "_" + options.obj[0] + ".csv")
with open(os.path.join(os.getcwd(), cfg["debug_dir"], "single", options.hardware,
options.software, "measurement.json")) as mfl:
m = json.load(mfl)
# get init data
df = pd.read_csv(init_dir)
df = df[columns]
# get bug data
bug_df = pd.read_csv(bug_dir)
# initialize causal model object
CM = CausalModel(columns)
g = DiGraph()
g.add_nodes_from(columns)
# edge constraints
tabu_edges = CM.get_tabu_edges(columns, conf_opt, options.obj)
G, di_edges, bi_edges = run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS)
g.add_edges_from(di_edges + bi_edges)
var_types = {}
for col in columns:
var_types[col] = "c"
# Get Bug and update df
bug_exists = True
if options.bug_index:
bug_df = bug_df.iloc[int(options.bug_index):int(options.bug_index) + 1]
result_columns = conf_opt + obj_columns
measurement_dir = os.path.join(os.getcwd(),"data","measurement","output","debug_exp.csv")
for bug_id in range(len(bug_df)):
result_df = pd.DataFrame(columns=result_columns)
if options.bug_index:
bug = bug_df.loc[int(options.bug_index)]
bug_id = int(options.bug_index)
else:
bug = bug_df.loc[bug_id]
# update df after a bug is resolved
df = pd.read_csv(init_dir)
df = df[columns]
# initialize causal model object
CM = CausalModel(columns)
g = DiGraph()
g.add_nodes_from(columns)
# edge constraints
tabu_edges = CM.get_tabu_edges(columns, conf_opt, options.obj)
G, di_edges, bi_edges = run_unicorn_loop(CM, df,
tabu_edges, columns, options,
NUM_PATHS)
g.add_edges_from(di_edges + bi_edges)
bug_exists = True
print("--------------------------------------------------")
print("BUG ID: ", bug_id)
print("--------------------------------------------------")
it = 0
previous_config = bug[conf_opt].copy()
while bug_exists:
# identify causal paths
paths = CM.get_causal_paths(columns, di_edges, bi_edges,
options.obj)
# compute causal paths
if len(options.obj) < 2:
# single objective faults
for key, val in paths.items():
if len(paths[key]) > NUM_PATHS:
paths = CM.compute_path_causal_effect(df, paths[key], G,
NUM_PATHS)
else:
paths = paths[options.obj[0]]
# compute individual treatment effect in a path
print(paths)
config = CM.compute_individual_treatment_effect(df, paths, g,
query, options, bug[options.obj[0]],
previous_config, cfg, var_types)
else:
# multi objective faults
paths = paths[options.obj[0]]
# compute individual treatment effect in a path
config = CM.compute_individual_treatment_effect(df, paths, g,
query, options, bug[options.obj],
previous_config, cfg, var_types)
# perform intervention. This updates the init_data
if config is not None:
if options.mode == "offline":
curm = m[options.hardware][options.software][options.obj[0]][str(
bug_id)][str(it)]["measurement"]
if curm < (1 - query) * bug[options.obj[0]]:
bug_exists = False
print("--------------------------------------------------")
print("+++++++++++++++Recommended Fix++++++++++++++++++++")
print(config)
print("Unicorn Fix Value", curm)
print("Number of Samples Required", str(it))
print("--------------------------------------------------")
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print(bug[conf_opt])
print("Bug Objective Value", int(bug[options.obj[0]]))
print("--------------------------------------------------")
config = config.tolist()
config.extend([curm])
config = pd.DataFrame([config])
config.columns = result_columns
result_df = pd.concat([result_df, config], axis=0)
result_df = result_df[result_columns]
result_df["bug_id"] = bug_id
result_df["method"] = "Unicorn"
result_df["num_samples"]=it
result_df["gain"]= ((bug[options.obj[0]]-curm)/bug[options.obj[0]])*100
if options.bug_index is None:
if bug_id == 0:
result_df.to_csv(measurement_dir,index=False)
else:
result_df.to_csv(measurement_dir,index=False, header=False,mode="a")
else:
curc = m[options.hardware][options.software][options.obj[0]][str(
bug_id)][str(it)]["conf"]
print("--------------------------------------------------")
print("+++++++++++++++++++++Bug++++++++++++++++++++++++++")
print("Recommended Config Objective Value", curm)
print("--------------------------------------------------")
it += 1
config = config.tolist()
config.extend(curc)
config.extend([curm])
config = | pd.DataFrame([config]) | pandas.DataFrame |
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import requests
import spotipy
from typing import List
from os import listdir
import json
import sys
from tqdm import tqdm
"""
Credentials to :
https://towardsdatascience.com/get-your-spotify-streaming-history-with-python-d5a208bbcbd3
Spotify data places on the folder MyData
"""
# python my_spotify_history_enrichment.py "MyData/User3" "ProcessedData/User3"
if len(sys.argv) >= 3:
folder_path = sys.argv[1]
target_path = sys.argv[2]
else:
folder_path = 'MyData/User3'
target_path = 'ProcessedData/User3'
def get_streamings(path: str = folder_path) -> List[dict]:
files = [path + '/' + x for x in listdir(path)
if x.split('.')[0][:-1] == 'StreamingHistory']
all_streamings = []
for file in files:
with open(file, 'r', encoding='utf-8') as f:
all_streamings = json.load(f)
return all_streamings
# Remplacer par vos données
with open("my_spotify_dev_account.json", 'r', encoding='utf-8') as my_spotify_dev_account:
my_spotify_dev_account = json.load(my_spotify_dev_account)
token = spotipy.util.prompt_for_user_token(username=my_spotify_dev_account["username"],
scope=my_spotify_dev_account["scope"],
client_id=my_spotify_dev_account["client_id"],
client_secret=my_spotify_dev_account["client_secret"],
redirect_uri=my_spotify_dev_account["redirect_uri"])
print("TOKEN : ", token)
def get_id(track_name: str, token: str) -> str:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': f'Bearer ' + token
}
params = [('q', track_name), ('type', 'track')]
try:
response = requests.get('https://api.spotify.com/v1/search',
headers=headers, params=params, timeout=5)
json = response.json()
first_result = json['tracks']['items'][0]
track_id = first_result['id']
track_artist = []
for artist in first_result['artists']:
track_artist.append(artist["name"])
track_album = first_result["album"]["name"]
popularity = first_result["popularity"]
return [track_id, track_artist, track_album, popularity]
except:
return [None, None, None, None]
def get_user_features(track_id: str, token: str) -> dict:
sp = spotipy.Spotify(auth=token)
try:
features = sp.audio_features([track_id])
return features[0]
except:
return None
def get_recommendations(track_names, token):
headers = {
'Authorization': f'Bearer ' + token
}
params = [('seed_tracks', ",".join(track_names)),
('seed_artists', ",".join([])), ('seed_genres', ",".join([]))]
try:
response = requests.get('https://api.spotify.com/v1/recommendations',
headers=headers, params=params, timeout=5)
json = response.json()
recommendations = []
for track in json["tracks"]:
recommendations.append(track["id"])
return recommendations
except:
return None
streamings = get_streamings(folder_path)
unique_tracks = list(set([streaming['trackName']
for streaming in streamings]))
print("Getting all listened songs features")
all_features = {}
all_recommendations = {}
for i in tqdm(range(len(unique_tracks))):
track = unique_tracks[i]
[track_id, track_artist, track_album, popularity] = get_id(track, token)
features = get_user_features(track_id, token)
if features:
features["id"] = track_id
features["artist"] = track_artist
features["album"] = track_album
features["popularity"] = popularity
all_features[track] = features
all_recommendations[track_id] = get_recommendations([track_id], token)
with open(target_path + '/track_data.json', 'w') as outfile:
json.dump(all_features, outfile)
def get_songs_features(track_id: str, token: str) -> dict:
sp = spotipy.Spotify(auth=token)
try:
features = sp.audio_features([track_id])[0]
track_features = sp.track(track_id)
features["name"] = track_features["name"]
features["popularity"] = track_features["popularity"]
features["artists"] = [track_features["artists"][k]["name"]
for k in range(len(track_features["artists"]))]
features["album"] = track_features["album"]["name"]
return features
except:
return None
print("Getting all recommended songs")
unique_ids = []
for i in tqdm(range(len(list(all_recommendations.keys())))):
ids = list(all_recommendations.keys())[i]
for id_reco in all_recommendations[ids]:
if id_reco not in unique_ids:
unique_ids.append(id_reco)
print("Getting all features from recommended songs")
all_features = {}
for i in tqdm(range(len(unique_ids))):
track_id = unique_ids[i]
features = get_songs_features(track_id, token)
if features:
all_features[track_id] = features
with open(target_path + '/recommendations_songs.json', 'w') as outfile:
json.dump(all_features, outfile)
print("Data processing")
recommendations_data = pd.read_json(
target_path + '/recommendations_songs.json').T
numerical_features = ["danceability", "energy", "loudness", "speechiness",
"acousticness", "instrumentalness", "liveness", "valence", "tempo", "popularity"]
other_features = ["mode", "key", "id", "duration_ms",
"artists", "name", "id", "artists", "album"]
numerical_recommendations_data = recommendations_data[numerical_features]
track_np = np.array(numerical_recommendations_data.values)
print("Data normalization")
minMaxScaler = MinMaxScaler().fit(track_np)
track_np = minMaxScaler.transform(track_np)
track_data_normalized = pd.DataFrame(
track_np, columns=numerical_features, index=recommendations_data.index)
for feature in other_features:
track_data_normalized[feature] = recommendations_data[feature]
print("Data clustering")
n_clusters = 4
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(track_np)
kmeans_cluster_labels = kmeans.labels_
kmeans_cluster_centers = kmeans.cluster_centers_
track_data_normalized["kmeans_cluster"] = kmeans_cluster_labels
# sub-clusterisation : each cluster is divided into small clusters
n_sub_clusters = 10
track_data_normalized["kmeans_subcluster"] = 0
for i in range(n_clusters):
num_track_data_np = np.array(
track_data_normalized[track_data_normalized["kmeans_cluster"] == i][numerical_features])
subkmeans = KMeans(n_clusters=n_sub_clusters,
random_state=0).fit(num_track_data_np)
track_data_normalized.loc[track_data_normalized["kmeans_cluster"]
== i, "kmeans_subcluster"] = subkmeans.labels_
print("Dimension reduction with PCA")
pca = PCA(n_components=2)
pca.fit(track_np)
track_np_pca = pca.transform(track_np)
track_data_normalized["x"] = track_np_pca[:, 0]
track_data_normalized["y"] = track_np_pca[:, 1]
cluster_centers_pca = pca.transform(kmeans_cluster_centers)
def comput_dist(x, y, cluster):
return 1000*np.sqrt((x-cluster_centers_pca[cluster][0])**2 + (y-cluster_centers_pca[cluster][1])**2)
track_data_normalized["dist"] = track_data_normalized.apply(
lambda x: comput_dist(x['x'], x['y'], x['kmeans_cluster']), axis=1)
songs_df = track_data_normalized[["name", "kmeans_cluster", "kmeans_subcluster",
"x", "y", "dist", "id", "artists", "album"]+numerical_features]
songs_df.T.to_json(target_path+'/songs_json.json')
track_data = pd.read_json(target_path+'/track_data.json')
track_data = track_data.loc[["danceability", "energy", "loudness", "speechiness", "acousticness",
"instrumentalness", "liveness", "valence", "tempo", "duration_ms", "popularity", "artist", "album", "id"]].T
track_np_user = np.array(track_data[numerical_features].values)
track_np_user = minMaxScaler.transform(track_np_user)
track_data_normalized = pd.DataFrame(
track_np_user, columns=numerical_features, index=track_data.index)
track_data_normalized["artist"] = track_data["artist"]
track_data_normalized["album"] = track_data["album"]
track_data_normalized["id"] = track_data["id"]
track_data_normalized["name"] = track_data.index
track_data_normalized["duration_ms"] = track_data["duration_ms"]
files = [folder_path + '/' + x for x in listdir(folder_path)
if x.split('.')[0][:-1] == 'StreamingHistory']
streaming_history_df = pd.read_json(files[0])
if len(files) > 1:
for file_path in files[1:]:
streaming_history_df = pd.concat(
(streaming_history_df, | pd.read_json(file_path) | pandas.read_json |
# Long Author List formatting tool
# <NAME> (<EMAIL> 2020)
# Usage: python3 lal.py
# Input: lal_data2.txt with one author per row and up to 5 affiliations
# <First>;<Last>;<Email>;<Group1>;<Group2>;<Group3>;<Group4>;<Group5>
# Example: Heiko;Goelzer;<EMAIL>;IMAU,UU;ULB;nil;nil;nil
# Use 'nil','nan','0' or '-' to fill unused affiliations
# Output: lal_inout2.txt when saving the modified listing, can be used as
# input the next time
# Parsed: lal_parsed.txt when parsed to insert in a manuscript
# Selected lines and selected blocks can be rearranged by dragging, sorted by last name and deleted.
# 'Save' will write the updated list to a file that can be reused later
# 'Parse' will write formatted output that can be copy-pasted
import tkinter as tk;
# Listbox for ordering
class ReorderableListbox(tk.Listbox):
""" A Tkinter listbox with drag & drop reordering of lines """
def __init__(self, master, **kw):
kw['selectmode'] = tk.EXTENDED
tk.Listbox.__init__(self, master, kw)
self.bind('<Button-1>', self.setCurrent)
self.bind('<Control-1>', self.toggleSelection)
self.bind('<B1-Motion>', self.shiftSelection)
self.bind('<Leave>', self.onLeave)
self.bind('<Enter>', self.onEnter)
self.selectionClicked = False
self.left = False
self.unlockShifting()
self.ctrlClicked = False
def orderChangedEventHandler(self):
pass
def onLeave(self, event):
# prevents changing selection when dragging
# already selected items beyond the edge of the listbox
if self.selectionClicked:
self.left = True
return 'break'
def onEnter(self, event):
#TODO
self.left = False
def setCurrent(self, event):
self.ctrlClicked = False
i = self.nearest(event.y)
self.selectionClicked = self.selection_includes(i)
if (self.selectionClicked):
return 'break'
def toggleSelection(self, event):
self.ctrlClicked = True
def moveElement(self, source, target):
if not self.ctrlClicked:
element = self.get(source)
self.delete(source)
self.insert(target, element)
def unlockShifting(self):
self.shifting = False
def lockShifting(self):
# prevent moving processes from disturbing each other
# and prevent scrolling too fast
# when dragged to the top/bottom of visible area
self.shifting = True
def shiftSelection(self, event):
if self.ctrlClicked:
return
selection = self.curselection()
if not self.selectionClicked or len(selection) == 0:
return
selectionRange = range(min(selection), max(selection))
currentIndex = self.nearest(event.y)
if self.shifting:
return 'break'
lineHeight = 12
bottomY = self.winfo_height()
if event.y >= bottomY - lineHeight:
self.lockShifting()
self.see(self.nearest(bottomY - lineHeight) + 1)
self.master.after(500, self.unlockShifting)
if event.y <= lineHeight:
self.lockShifting()
self.see(self.nearest(lineHeight) - 1)
self.master.after(500, self.unlockShifting)
if currentIndex < min(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange[::-1]:
if not self.selection_includes(i):
self.moveElement(i, max(selection)-notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = min(selection)-1
self.moveElement(currentIndex, currentIndex + len(selection))
self.orderChangedEventHandler()
elif currentIndex > max(selection):
self.lockShifting()
notInSelectionIndex = 0
for i in selectionRange:
if not self.selection_includes(i):
self.moveElement(i, min(selection)+notInSelectionIndex)
notInSelectionIndex += 1
currentIndex = max(selection)+1
self.moveElement(currentIndex, currentIndex - len(selection))
self.orderChangedEventHandler()
self.unlockShifting()
return 'break'
def deleteSelection(self):
# delete selected items
if len(self.curselection()) == 0:
return
self.delete(min(self.curselection()),max(self.curselection()))
def sortAll(self):
# sort all items alphabetically
temp_list = list(self.get(0, tk.END))
temp_list.sort(key=str.lower)
# delete contents of present listbox
self.delete(0, tk.END)
# load listbox with sorted data
for item in temp_list:
self.insert(tk.END, item)
def sortSelection(self):
# sort selected items alphabetically
if len(self.curselection()) == 0:
return
mmax = max(self.curselection())
mmin = min(self.curselection())
temp_list = list(self.get(mmin,mmax))
#print(temp_list)
# Sort reverse because pushed back in reverse order
temp_list.sort(key=str.lower,reverse=True)
# delete contents of present listbox
self.delete(mmin,mmax)
# load listbox with sorted data
for item in temp_list:
self.insert(mmin, item)
def save(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
matche = (df["Email"].isin([items[2]]))
dfout = dfout.append(df[matchf & matchl])
dfout.to_csv('lal_inout2.txt', sep=';', header=None, index=None)
print("File saved!")
def parse_word(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
grp = dfout[["Group1","Group2","Group3","Group4","Group5"]]
unique_groups = []
group_ids = []
k = 0
# collect unique groups and indices
for i in range(0,dfout.shape[0]):
groups = []
# loop through max 5 groups
for j in range(0,5):
# Exclude some common dummy place holders
if (grp.iloc[i,j] not in ['nil','nan','0','-']):
if (grp.iloc[i,j] not in unique_groups):
unique_groups.append(grp.iloc[i,j])
k = k + 1
groups.append(k)
else:
ix = unique_groups.index(grp.iloc[i,j])+1
groups.append(ix)
# Add author group ids
group_ids.append(groups)
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_word.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print(first.iloc[i].strip(), end =" ", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
for j in range(0,len(group_ids[i])):
if j < len(group_ids[i])-1:
print(str(group_ids[i][j]), end =",", file=text_file)
else:
print(str(group_ids[i][j]), end ="", file=text_file)
#print(" ", end ="", file=text_file)
if (i < dfout.shape[0]-1):
# comma and space before next name
print(", ", end ="", file=text_file)
# Add some space between names and affiliations
print("\n\n", file=text_file)
# Write out affiliations
for i in range(0,len(unique_groups)):
print("(", end ="", file=text_file)
print(str(i+1), end ="", file=text_file)
print(")", end =" ", file=text_file)
print(unique_groups[i], end ="\n", file=text_file)
print("File lal_parsed_word.txt written")
# Parse tex \author and \affil
def parse_tex(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = pd.DataFrame()
for item in temp_list:
items = item.split(",")
matchl = (df["LastName"].isin([items[0]]))
matchf = (df["FirstName"].isin([items[1]]))
dfout = dfout.append(df[matchf & matchl])
# parse
first = dfout["FirstName"]
last = dfout["LastName"]
grp = dfout[["Group1","Group2","Group3","Group4","Group5"]]
unique_groups = []
group_ids = []
k = 0
# collect unique groups and indices
for i in range(0,dfout.shape[0]):
groups = []
# loop through max 5 groups
for j in range(0,5):
# Exclude some common dummy place holders
if (grp.iloc[i,j] not in ['nil','nan','0','-']):
if (grp.iloc[i,j] not in unique_groups):
unique_groups.append(grp.iloc[i,j])
k = k + 1
groups.append(k)
else:
ix = unique_groups.index(grp.iloc[i,j])+1
groups.append(ix)
# Add author group ids
group_ids.append(groups)
#print(group_ids)
#print(unique_groups)
# Compose text
with open("lal_parsed_tex.txt", "w") as text_file:
# write out names
for i in range(0,dfout.shape[0]):
print("\\Author[", end ="", file=text_file)
for j in range(0,len(group_ids[i])):
if j < len(group_ids[i])-1:
print(str(group_ids[i][j]), end =",", file=text_file)
else:
print(str(group_ids[i][j]), end ="]", file=text_file)
print("{", end ="", file=text_file)
print(first.iloc[i].strip(), end ="", file=text_file)
print("}{", end ="", file=text_file)
print(last.iloc[i].strip(), end ="", file=text_file)
print("}", end ="\n", file=text_file)
# Add some space between names and affiliations
print("\n", file=text_file)
# Write out affiliations
for i in range(0,len(unique_groups)):
print("\\affil", end ="", file=text_file)
print("[", end ="", file=text_file)
print(str(i+1), end ="", file=text_file)
print("]", end ="", file=text_file)
print("{", end ="", file=text_file)
print(unique_groups[i], end ="}\n", file=text_file)
print("File lal_parsed_tex.txt written")
# Parse simple list of names
def parse_list(self,df):
# save current list
temp_list = list(self.get(0, tk.END))
# create output df
dfout = | pd.DataFrame() | pandas.DataFrame |
import nltk
import sklearn_crfsuite
from sklearn_crfsuite import metrics
import pandas as pd
from sklearn.preprocessing import label_binarize
import string
# nltk.download('conll2002')
flatten = lambda l: [item for sublist in l for item in sublist]
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import os
import sys
from sklearn.preprocessing import LabelEncoder
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
import argparse
import matplotlib.cm as cm
import codecs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
# nltk.corpus.conll2002.fileids()
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import scale
from gensim.models.word2vec import Word2Vec
import gensim
import random
from collections import OrderedDict
from sklearn.model_selection import KFold
# classifier information
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
LabeledSentence = gensim.models.doc2vec.LabeledSentence
import hdbscan
# classifier information
from keras.layers import Input
from keras.models import Model
from keras.layers import Dropout, Dense
from keras.models import Sequential
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import hdbscan
from sklearn.cluster import MiniBatchKMeans
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def model_ae(X_train,x_test,n=300,encoding_dim=32):
# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/
# r program
# this is our input placeholder
input = Input(shape=(n,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(n, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input, decoded)
# this model maps an input to its encoded representation
encoder = Model(input, encoded)
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, X_train,
epochs=20,
batch_size=32,
shuffle=True,
validation_data=(x_test, x_test))
return encoder
def call_silhout_(X,df,range_n_clusters):
hyper_parm_turning=OrderedDict()
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# clusterer = MiniBatchKMeans(n_clusters=n_clusters,init='k-means++', random_state=10)
from sklearn.mixture import GaussianMixture
# Predict GMM cluster membership
clusterer = GaussianMixture(n_components=n_clusters, random_state=10)
# from sklearn.cluster import AgglomerativeClustering
# clusterer = AgglomerativeClustering(n_clusters=n_clusters)
cluster_labels = clusterer.fit_predict(X)
labels="cluster_labels_{}".format(n_clusters)
if not labels in df.keys():
df[labels]=cluster_labels
sample_dist_std=np.std(df.groupby(labels).size())
sample_dist_avrg=np.median(df.groupby(labels).size())
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
if not 'n_clusters' in hyper_parm_turning.keys():
hyper_parm_turning['n_clusters']=[n_clusters]
else:
hyper_parm_turning['n_clusters'].append(n_clusters)
if not 'silhouette_avg' in hyper_parm_turning.keys():
hyper_parm_turning['silhouette_avg']=[silhouette_avg]
else:
hyper_parm_turning['silhouette_avg'].append(silhouette_avg)
if not 'sample_dist_std' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_std']=[sample_dist_std]
else:
hyper_parm_turning['sample_dist_std'].append(sample_dist_std)
if not 'sample_dist_avrg' in hyper_parm_turning.keys():
hyper_parm_turning['sample_dist_avrg']=[sample_dist_avrg]
else:
hyper_parm_turning['sample_dist_avrg'].append(sample_dist_avrg)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
return df,hyper_parm_turning
def main():
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("input_file", help="The input file to be projected")
# parser.add_argument("speech_feats_file", help="The input file to be projected")
# parser.add_argument("out_path_file", help="The input file to be projected")
args = parser.parse_args()
df_=pd.read_csv(args.input_file)
# print(df_.head())
df_doc2vec=df_.copy()
df_doc2vec=df_doc2vec.drop(['utterance'], axis=1)
# print(df_doc2vec.columns.to_list())
# df_['sentence_label']=sentence_emotion_labeling
df_doc2vec = df_doc2vec[df_doc2vec.columns[:300]]
print('loading the database')
# print(df_doc2vec.head())
print(df_doc2vec.shape)
from sklearn.preprocessing import scale
train_vecs = scale(df_doc2vec)
print('scaling the data')
#using pca as dimension reduction technique
PCA_model = PCA(.90, random_state=42)
X_standard = PCA_model.fit_transform(train_vecs)*(-1)
print(X_standard.shape)
# Single VD
# from numpy import array
# from sklearn.decomposition import TruncatedSVD
# TruncatedSVD_model=TruncatedSVD(n_components=3)
# X_standard = TruncatedSVD_model.fit_transform(train_vecs)
# using T-distributed Stochastic Neighbor Embedding (T-SNE)
# from sklearn.manifold import TSNE
# X_standard = TSNE(n_components=3).fit_transform(train_vecs)
# from sklearn.decomposition import NMF
# NMF_model=NMF(n_components=3)
# X_standard = NMF_model.fit_transform(train_vecs)
# from sklearn import random_projection
# X_standard = random_projection.GaussianRandomProjection(n_components=2).fit_transform(X_standard)
# X_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)
# encodeing=model_ae(X_train,x_test)
# X_standard=scale(encodeing.predict(train_vecs))
# print(X_standard)
# print(PCA_model.explained_variance_ratio_)
# print(TruncatedSVD_model.explained_variance_ratio_)
# print(NMF_model.explained_variance_ratio_)
# clustering
range_n_clusters =np.arange(20,22,+1)
# # print(df_.shape)
X_labeled,hyper_parm_turning=call_silhout_(X_standard,df_,range_n_clusters)
# print(X_labeled.head())
X_labeled['utterance']=df_.index.to_list()
# # X_labeled['sentence_label']=sentence_emotion_labeling
cluster_='cluster_labels_20'
# cluster_labeling=X_labeled[['utterance','sentence_label',cluster_]].groupby(cluster_).size()
cluster_labeling=X_labeled[['utterance',cluster_]].groupby(cluster_).size()
print(cluster_labeling)
hyper_parm_turning= | pd.DataFrame(hyper_parm_turning) | pandas.DataFrame |
import pandas as pd
import numpy as np
__all__=['xgb_parse']
def _xgb_tree_leaf_parse(xgbtree,nodeid_leaf):
'''给定叶子节点,查找 xgbtree 树的路径
'''
leaf_ind=list(nodeid_leaf)
result=xgbtree.loc[(xgbtree.ID.isin(leaf_ind)),:]
result['Tag']='Leaf'
node_id=list(result.ID)
while len(node_id)>0:
tmp1=xgbtree.loc[(xgbtree.Yes.isin(node_id)),:]
tmp2=xgbtree.loc[(xgbtree.No.isin(node_id)),:]
tmp1['Tag']='Yes'
tmp2['Tag']='No'
node_id=list(tmp1.ID)+list(tmp2.ID)
result=pd.concat([result,tmp1,tmp2],axis=0)
return result
def xgb_parse(model,feature=None):
'''给定模型和单个样本,返回该样本的xgbtree树路径以及该样本的特征重要度
'''
feature_names=model.get_booster().feature_names
#missing_value=model.get_params()['missing']
f0= | pd.DataFrame({'GainTotal':model.feature_importances_,'Feature':feature_names}) | pandas.DataFrame |
from urllib.request import urlretrieve
import pandas as pd
import os
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename = "fremont.csv", url=FREMONT_URL, force_download=False):
'''
This function is used to prepare the data:
a) download the data from assigned URL (FREMONT DATA)
b) parse the date of the data using pandas
c) rename the column to simplify it, using columns name: ['Total', 'East', 'West']
------------
Parameters:
------------
filename: string (optional)
location to save the data
url: string (optional)
web location of the data
force_download: bool (optional)
if True, force redownload of data
------------
Returns
------------
data : pandas.DataFrame
The fremont bridge data contains passing bike data.
'''
if force_download or not os.path.exists(filename):
urlretrieve(url, 'freemont.csv')
data = pd.read_csv('fremont.csv', index_col = 'Date')
try:
data.index=pd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p') # check it here: https://strftime.org/
except TypeError:
data.index= | pd.to_datetime(data.index) | pandas.to_datetime |
import pandas as pd
import requests
import datetime
import numpy as np
from numpy import array
import matplotlib.pyplot as plt
from numpy import hstack
import seaborn as sns
import random
from functools import reduce
from keras.models import load_model
from keras.models import Sequential
from keras.layers import LSTM,Bidirectional,Activation
from keras import optimizers
from keras.layers import Dense,Dropout
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from statsmodels.tsa.seasonal import STL
from sklearn.cluster import AgglomerativeClustering,KMeans
import logging
def cluster_model(self,entry_info,repo_id,df):
data = | pd.read_csv("repo_reviews_all.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = | Index(['a', 'b']) | pandas.Index |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# remove_cell
import sys
sys.path.insert(0, '/home/jovyan/ros/')
# %load_ext autoreload
# %autoreload 2
# +
# remove_cell
import itertools as it
import logging
import operator
import altair as A
import arviz as az
from bambi import Model
import dscontrib.wbeard as wb
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as nr
import pandas as pd
import scipy.stats as st
from numba import njit
import toolz.curried as z
import seaborn as sns
from ros.utils.common import hstack, vstack, plot_wrap, drop_outliers, disable_logging
from ros.utils import bootstrap as bs, plot as plu
disable_logging(["numba", "arviz", "pymc3", "bambi", "numexpr"]) #
str_concat = z.compose("-".join, z.map(str))
lmap = z.comp(list, map)
plt.rcParams["font.size"] = 17
p = lambda: None
p.__dict__.update(
dict(
zip(
"hide_output hide_input collapse_hide collapse_show remove_cell".split(),
range(10),
)
)
)
# -
# There's an interplay between sample size, effect size, and the sensitivity of an experiment to detect changes that we spend time thinking about at Mozilla. All else equal, it's usually preferable to enroll a smaller sample size so long as it's sufficient to pick up the signal of the treatment. Among other reasons, this helps reduce the likelihood of different experiments interacting with each other. But there are ways to increase the resolution of experimental analysis without having to increase the population size, and this efficient frontier is a good place to aim for.
#
# Some techniques I've tried lately are using [blocking](https://en.wikipedia.org/wiki/Blocking_(statistics)) and pre-treatment predictors as useful ways to get more precise estimates for free, without the need of a larger study population. This post simulates experimental data to demonstrate the improvement in precision that you can get with these.
# ## The Setup
#
# The example here is a study that measures an improvement in startup times of a new feature. This is a metric we pay quite a bit of attention to in the platform area, and are obviously interested in features that can reduce startup times. The study population in this example has a distribution of startup times, but on average Windows 7 users have longer times than Windows 10 users.[1]
#
# The basic idea with blocking is that if 2 groups in the population have significantly different outcomes, independent of the treatment variable, you can get a more precise estimate of the treatment effect by modeling these groups separately.
# Intuitively, if the 2 groups have significantly different outcomes even before the treatment is applied, this difference will contribute to a higher variance in the estimate when it comes time to measure the size of the treatment effect. The variable that determines the grouping needs to be independent of the treatment assignment, so using Windows 7 as a blocking factor would be a good choice, as our feature doesn't do anything preposterous like upgrade the OS once the client enrolls.
#
# The second idea is to use pre-treatment variables as a predictor. In this case, it involves looking at the startup time before enrollment, and seeing how much this changes on average for the treatment group once they get the feature. This works if a
# client's pre-treatment startup time $t_{pre}$ is more informative of the post-treatment startup time $t_{post}$ than merely knowing the OS version, and it's safe to assume here that $t_{post}$ and the OS are conditionally independent given $t_{pre}$.
#
# As with many metrics we use, the log of the startup time more closely follows the distributions we're used to. For this simulation we'll set the log of the first_paint time to follow a gamma distribution, with the mean time increased for Windows 7 users.[2] For users in the treatment group, we'll add a noisy log(.9) (=-.105) to the distribution, which translates to roughly a 10% decrease in startup times on the linear scale.[3] After the simulation, we'll look at how much of an improvement you get with the estimates when using a randomized block design. The formulas describing the simulation are
#
#
# \begin{align}
# fp_{baseline} & \sim \mathrm{Gamma}(4 + \mathbb 1_{win7} \cdot \mu_{win}) \\
# w_{treat} & \sim \mathcal N (\mathbb 1_{treat} \cdot \mu_{treat}, \sigma_{treat}) \\
# \log(first\_paint) & = fp_{baseline} + w_{treat}
# \end{align}
# +
# hide_input
@njit
def seed(n):
nr.seed(n)
@njit
def randn(mu, sig, size=1):
return nr.randn(size) * sig + mu
# +
# collapse_show
WIN7_FACT = 1.2
TREAT_MU = np.log(.9)
TREAT_SD = .15
@njit
def gen_log_first_paint_pre_post(win7, treat, size=1):
pre = nr.gamma(4 + WIN7_FACT * win7, 1, size=size)
return np.concatenate((pre, pre + randn(TREAT_MU * treat, TREAT_SD, size=size)))
# +
# collapse_hide
n_each = 10_000
n_win_7 = {0: n_each, 1: n_each}
seed(0)
def add_columns(df):
pre_post = pd.DataFrame(
[
gen_log_first_paint_pre_post(win7, treat=treat)
# gen_pre_post(win7, win7_fact=WIN7_FACT, treat=treat, treat_fact=TREAT_FACT,)
for win7, treat in df[["win7", "treat"]].itertuples(index=False)
],
columns=["lpre", "lpost"],
).assign(
pre=lambda df: np.exp(df.lpre),
post=lambda df: np.exp(df.lpost),
)
df = hstack([df, pre_post])
df = (
df.assign(os=lambda df: df.win7.map({0: "win10", 1: "win7"}))
.reset_index(drop=0)
.rename(columns={"index": "id"})
)
df["demo"] = [
str_concat(tup)
for tup in df[["treat", "os"]]
.assign(treat=lambda df: df.treat.map({1: "treat", 0: "control"}))
.itertuples(index=False)
]
return df
def create_test_pop(n_each=50):
data_dct = [
{"win7": win7, "treat": treat}
for win7 in (0, 1)
for treat in (0, 1)
for _ in range(n_win_7[win7])
]
df_ = | pd.DataFrame(data_dct) | pandas.DataFrame |
import numpy as np
import random
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import queue
import collections
import pandas as pd
INPUT_FILE="blobs.txt"
ITERATIONS=10
#Define label for differnt point group
NOISE = 0
UNASSIGNED = 0
core=-1
edge=-2
dataset = []
def read_dataset():
"""
Reading dataset
"""
global INPUT_FILE, dataset
f = open(INPUT_FILE, "r")
lines = f.readlines()
for i in range(len(lines)):
data = lines[i].split()
dataset.append(list(map(float, data)))
# print(data)
f.close()
pass
def find_nearest_neighbour(k):
"""
Nearest neighbour
"""
global dataset
nearest_neighbors = NearestNeighbors(n_neighbors=k)
nearest_neighbors.fit(dataset)
distances, indices = nearest_neighbors.kneighbors(dataset)
distances = np.sort(distances, axis=0)[:, 1]
# print(distances, indices)
plt.plot(distances)
plt.savefig('Nearest_Neighbour.png')
# plt.show()
def dist(point1, point2):
"""Euclid distance function"""
x1 = point1[0]
x2 = point2[0]
y1 = point1[1]
y2 = point2[1]
# create the points
p1 = (x1 - x2)**2
p2 = (y1 - y2)**2
return np.sqrt(p1 + p2)
#function to find all neigbor points in radius
def neighbor_points(data, pointIdx, radius):
points = []
for i in range(len(data)):
#Euclidian distance using L2 Norm
# if np.linalg.norm(data[i] - data[pointIdx]) <= radius:
if dist(data[i], data[pointIdx]) <= radius:
points.append(i)
return points
#DB Scan algorithom
def dbscan(data, Eps, MinPt):
'''
- Eliminate noise points
- Perform clustering on the remaining points
> Put an edge between all core points which are within Eps
> Make each group of core points as a cluster
> Assign border point to one of the clusters of its associated core points
'''
#initilize all pointlable to unassign
pointlabel = [UNASSIGNED] * len(data)
pointcount = []
#initilize list for core/noncore point
corepoint=[]
noncore=[]
#Find all neigbor for all point
for i in range(len(data)):
pointcount.append(neighbor_points(dataset,i,Eps))
#Find all core point, edgepoint and noise
for i in range(len(pointcount)):
if (len(pointcount[i])>=MinPt):
pointlabel[i]=core
corepoint.append(i)
else:
noncore.append(i)
for i in noncore:
for j in pointcount[i]:
if j in corepoint:
pointlabel[i]=edge
break
#start assigning point to cluster
cl = 1
#Using a Queue to put all neigbor core point in queue and find neigboir's neigbor
for i in range(len(pointlabel)):
q = queue.Queue()
if (pointlabel[i] == core):
pointlabel[i] = cl
for x in pointcount[i]:
if(pointlabel[x]==core):
q.put(x)
pointlabel[x]=cl
elif(pointlabel[x]==edge):
pointlabel[x]=cl
#Stop when all point in Queue has been checked
while not q.empty():
neighbors = pointcount[q.get()]
for y in neighbors:
if (pointlabel[y]==core):
pointlabel[y]=cl
q.put(y)
if (pointlabel[y]==edge):
pointlabel[y]=cl
cl=cl+1 #move to next cluster
return pointlabel,cl
def calc_distance(X1, X2):
return(sum((X1 - X2)**2))**0.5
def findClosestCentroids(ic, X):
assigned_centroid = []
for i in X:
distance=[]
for j in ic:
distance.append(calc_distance(i, j))
assigned_centroid.append(np.argmin(distance))
return assigned_centroid
def calc_centroids(clusters, X):
new_centroids = []
new_df = pd.concat([pd.DataFrame(X), | pd.DataFrame(clusters, columns=['cluster']) | pandas.DataFrame |
import pandas as pd
test_data_set = pd.read_csv('test.csv')
train_data_set = pd.read_csv('train.csv')
gen_sub_set = pd.read_csv('gender_submission.csv')
test_set = gen_sub_set.merge(test_data_set,how='left')
Data_Set = | pd.concat([train_data_set,test_set],axis=0) | pandas.concat |
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import arctern
def test_suite():
from multiprocessing import Process
import time
p1 = Process(target=ST_Intersection)
p2 = Process(target=ST_Equals)
p3 = Process(target=ST_Touches)
p4 = Process(target=ST_Overlaps)
p5 = Process(target=ST_Crosses)
p6 = Process(target=ST_Point)
p7 = Process(target=ST_Contains)
p8 = Process(target=ST_Intersects)
p9 = Process(target=ST_Distance)
p10 = Process(target=ST_DistanceSphere)
p11 = Process(target=ST_HausdorffDistance)
p12 = Process(target=ST_PolygonFromEnvelope)
start = time.time()
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
p11.start()
p12.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
p10.join()
p11.join()
p12.join()
end = time.time()
print('Task runs %0.2f seconds.' % ((end - start)))
def ST_Intersection():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Intersection(data1, data2)
assert len(rst) == 40000000
def ST_Equals():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Equals(data1, data2)
assert len(rst) == 40000000
def ST_Touches():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Touches(data1, data2)
assert len(rst) == 40000000
def ST_Overlaps():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Overlaps(data1, data2)
assert len(rst) == 40000000
def ST_Crosses():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Crosses(data1, data2)
assert len(rst) == 40000000
def ST_Point():
geo1 = 1.1
geo2 = 2.1
arr1 = [geo1 for x in range(1, 40000001)]
arr2 = [geo2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Point(data1, data2)
assert len(rst) == 40000000
def ST_Contains():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Contains(data1, data2)
assert len(rst) == 40000000
def ST_Intersects():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Intersects(data1, data2)
assert len(rst) == 40000000
def ST_Within():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Within(data1, data2)
assert len(rst) == 40000000
def ST_Distance():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = pandas.Series(arr1)
data2 = pandas.Series(arr2)
rst = arctern.ST_Distance(data1, data2)
assert len(rst) == 40000000
def ST_DistanceSphere():
geo1 = "POLYGON ((113.66220266388723 22.39277623851494, 114.58136061218778 22.39277623851494, 114.58136061218778 22.92800492531275 ,113.66220266388723 22.92800492531275, 113.66220266388723 22.39277623851494))"
geo2 = "POINT (1 1)"
geo_wkb1 = arctern.ST_GeomFromText(geo1)[0]
geo_wkb2 = arctern.ST_GeomFromText(geo2)[0]
arr1 = [geo_wkb1 for x in range(1, 40000001)]
arr2 = [geo_wkb2 for x in range(1, 40000001)]
data1 = | pandas.Series(arr1) | pandas.Series |
#!usr/bin/env python
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import pickle
from dataProcessor import processor
df_psy = pd.read_csv("Dataset/Youtube01-Psy.csv")
df_katyperry = pd.read_csv("Dataset/Youtube02-KatyPerry.csv")
df_lmfao = pd.read_csv("Dataset/Youtube03-LMFAO.csv")
df_eminem = pd.read_csv("Dataset/Youtube04-Eminem.csv")
df_shakira = pd.read_csv("Dataset/Youtube05-Shakira.csv")
df = | pd.concat([df_psy, df_katyperry, df_lmfao, df_eminem, df_shakira]) | pandas.concat |
#coding=utf-8
#键盘分析
#(1)分别读取csdn和yahoo数据库中的passwd
#(2)自定义了常见的14种键盘密码字符串
#(3)将从数据库中读取的passwd与定义的字符串进行子串匹配(忽略单个的字母和数字)
#(4)只选择相对高频的密码,生成保存频率最高的密码和对应频率的csv
import pandas as pd
import numpy as np
import csv
np.set_printoptions(suppress=True)
##############################################
#(1)读取数据
##############################################
yahoo_data = pd.read_csv('Yahoo-original-mail-passwd.csv',engine='python',sep='\t', quoting=csv.QUOTE_NONE,names=["email","passwd"], quotechar='"', error_bad_lines=False)
csdn_data = pd.read_csv('csdn-original-username-mail-passwd.csv',engine='python',sep='\t', quoting=csv.QUOTE_NONE,names=["name","email","passwd"],quotechar='"', error_bad_lines=False)
#读取密码
yahoo_passwd = pd.Series(yahoo_data['passwd'].values)
csdn_passwd = pd.Series(csdn_data['passwd'].values)
##############################################
#(2)定义常见的键盘密码字符串
##############################################
keyboard_pass1 = '<PASSWORD>,./'
keyboard_pass2 = '<PASSWORD>'
keyboard_pass3 = '<PASSWORD>;/'
keyboard_pass4 = '<PASSWORD>'
keyboard_pass5 = '<PASSWORD>'
keyboard_pass6 = '<PASSWORD>4rfvbgt56yhnmju78ik,.lo90p;/'
keyboard_pass7 = '0987654321poiuytrewq;lkjhgfdsa/.,mnbvcxz'
keyboard_pass8 = '<KEY>'
#忽略数字行
keyboard_pass9 = 'qazwsxedcrfvtgbyhnujmik,ol.p;/'
keyboard_pass10 = '<KEY>'
keyboard_pass11 = 'zaqxswcdevfrbgtnhymju,ki.lo/;p'
keyboard_pass12 = 'zaqwsxcderfvbgtyhn<PASSWORD>,.lop;/'
keyboard_pass13 = '<PASSWORD>'
keyboard_pass14 = '<PASSWORD>'
keyboard_pass_all = keyboard_pass1 + keyboard_pass2 + keyboard_pass3 + keyboard_pass4 + keyboard_pass5 + keyboard_pass6 + keyboard_pass7 + keyboard_pass8 + keyboard_pass9 + keyboard_pass10 + keyboard_pass11+keyboard_pass12+keyboard_pass13+keyboard_pass14
##############################################
#(3)分别在两个数据集中进行密码的子串匹配
##############################################
#定义字典来保存密码和其出现次数
yahoo_output = dict()
csdn_output = dict()
#######################YAHOO数据集
y_sum = 0
for data in yahoo_passwd.values:
data = str(data)#格式都转换为string类型
# 密码是定义的键盘密码字符串的子串并且不是单个的字母或数字
if data in keyboard_pass_all and len(data) > 1:
y_sum = y_sum + 1
if yahoo_output.has_key(data):
#密码已经存在,出现次数加一
yahoo_output[data] = yahoo_output[data]+ 1
else:#否则,出现次数为1
yahoo_output[data] = 1
#######################CSDN数据集
c_sum = 0
for data in csdn_passwd.values:
data = str(data)
if data in keyboard_pass_all and len(data) > 1:
c_sum = c_sum + 1
if csdn_output.has_key(data):
csdn_output[data] = csdn_output[data] + 1
else:
csdn_output[data] = 1
###############################################################
#(4)计算频率,选择相对高频的密码,并生成排名结果csv文件
###############################################################
#######################YAHOO数据集
#去掉出现次数少于 10 次的低频密码
result = dict()
for data in yahoo_output:
if yahoo_output[data] >= 10:
result[data] = yahoo_output[data]
yahoo_output = result
yahoo_output = pd.Series(yahoo_output)
#降序排序
yahoo_output = yahoo_output.sort_values(ascending = False)
yahoo = | pd.DataFrame({'password' : yahoo_output.index , 'numbers' : yahoo_output.values , 'probability' : None}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains all the remote tests. The data for these
tests is requested to ESA NEOCC portal.
* Project: NEOCC portal Python interface
* Property: European Space Agency (ESA)
* Developed by: Elecnor Deimos
* Author: <NAME>
* Date: 02-11-2021
© Copyright [European Space Agency][2021]
All rights reserved
"""
import io
import os
import re
import random
import pytest
import pandas as pd
import pandas.testing as pdtesting
import pandas.api.types as ptypes
import requests
from astroquery.esa.neocc.__init__ import conf
from astroquery.esa.neocc import neocc, lists, tabs
import astropy
# Import BASE URL and TIMEOUT
API_URL = conf.API_URL
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
TIMEOUT = conf.TIMEOUT
VERIFICATION = conf.SSL_CERT_VERIFICATION
@pytest.mark.remote_data
class TestLists:
"""Class which contains the unitary tests for lists module.
"""
# Dictionary for lists
lists_dict = {
"nea_list": 'allneo.lst',
"updated_nea": 'updated_nea.lst',
"monthly_update": 'monthly_update.done',
"risk_list": 'esa_risk_list',
"risk_list_special": 'esa_special_risk_list',
"close_approaches_upcoming": 'esa_upcoming_close_app',
"close_approaches_recent": 'esa_recent_close_app',
"priority_list": 'esa_priority_neo_list',
"priority_list_faint": 'esa_faint_neo_list',
"close_encounter" : 'close_encounter2.txt',
"impacted_objects" : 'impactedObjectsList.txt',
"neo_catalogue_current" : 'neo_kc.cat',
"neo_catalogue_middle" : 'neo_km.cat'
}
def test_get_list_url(self):
"""Test for checking the URL termination for requested lists.
Check invalid list name raise KeyError.
"""
# Valid inputs
valid_names = ["nea_list", "updated_nea", "monthly_update",
"risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent",
"priority_list", "priority_list_faint",
"close_encounter", "impacted_objects"]
# Invalid inputs
bad_names = ["ASedfe", "%&$", "ÁftR+", 154]
# Assert for valid names
for element in valid_names:
assert lists.get_list_url(element) == \
self.lists_dict[element]
# Assert for invalid names
for elements in bad_names:
with pytest.raises(KeyError):
lists.get_list_url(elements)
def test_get_list_data(self):
"""Check data obtained is pandas.DataFrame or pandas.Series
"""
# Check pd.Series output
list_series = ["nea_list", "updated_nea", "monthly_update"]
for series in list_series:
assert isinstance(lists.get_list_data(self.\
lists_dict[series], series), pd.Series)
# Check pd.DataFrame output
list_dfs = ["risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent", "priority_list",
"close_encounter", "priority_list_faint",
"impacted_objects"]
for dfs in list_dfs:
assert isinstance(lists.get_list_data(self.\
lists_dict[dfs], dfs), pd.DataFrame)
def test_parse_list(self):
"""Check data obtained is pandas.DataFrame or pandas.Series
"""
# Check pd.Series output
url_series = ["nea_list", "updated_nea", "monthly_update"]
for url in url_series:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
assert isinstance(lists.parse_list(url, data_list_d),
pd.Series)
# Check pd.DataFrame output
url_dfs = ["risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent", "priority_list",
"close_encounter", "priority_list_faint",
"impacted_objects"]
for url in url_dfs:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
assert isinstance(lists.parse_list(url, data_list_d),
pd.DataFrame)
# Invalid inputs
bad_names = ["ASedfe", "%&$", "ÁftR+", 154]
# Assert for invalid names
for elements in bad_names:
with pytest.raises(KeyError):
lists.parse_list(elements, data_list_d)
def test_parse_nea(self):
"""Check data: nea list, updated nea list and monthly update
"""
url_series = ["nea_list", "updated_nea", "monthly_update"]
for url in url_series:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_nea(data_list_d)
# Assert is a pandas Series
assert isinstance(new_list, pd.Series)
# Assert is not empty
assert not new_list.empty
# List of all NEAs
if url == "nea_list":
filename = os.path.join(DATA_DIR, self.lists_dict[url])
content = open(filename, 'r')
nea_list = pd.read_csv(content, header=None)
# Remove whitespaces
nea_list = nea_list[0].str.strip().replace(r'\s+', ' ',
regex=True)\
.str.replace('# ', '')
# Check size of the data frame
assert len(new_list.index) > 20000
# Check 74 first elements are equal from reference
# data (since provisional designator may change)
pdtesting.assert_series_equal(new_list[0:74], nea_list[0:74])
else:
# Check date format DDD MMM DD HH:MM:SS UTC YYYY
assert re.match(r'\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} '
r'\w{3} \d{4}', new_list.iloc[0])
def test_parse_risk(self):
"""Check data: risk_list, risk_list_special
"""
url_risks = ['risk_list', 'risk_list_special']
# Columns of risk lists
risk_columns = ['Object Name', 'Diameter in m', '*=Y',
'Date/Time', 'IP max', 'PS max', 'TS',
'Vel in km/s', 'First year', 'Last year',
'IP cum', 'PS cum']
risk_special_columns = risk_columns[0:8]
for url in url_risks:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_risk(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
if url == 'risk_list':
# Assert dataframe is not empty, columns names, length
assert not new_list.empty
assert (new_list.columns == risk_columns).all()
assert len(new_list.index) > 1000
# Assert columns data types
# Floats
float_cols = ['Diameter in m', 'IP max', 'PS max',
'Vel in km/s', 'IP cum', 'PS cum']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# int64
int_cols = ['TS', 'First year', 'Last year']
assert all(ptypes.is_int64_dtype(new_list[cols2])\
for cols2 in int_cols)
# Object
object_cols = ['Object Name', '*=Y']
assert all(ptypes.is_object_dtype(new_list[cols3])\
for cols3 in object_cols)
# Datetime
assert ptypes.is_datetime64_ns_dtype(
new_list['Date/Time'])
else:
# Currently risk special list is empty
assert new_list.empty
assert (new_list.columns == risk_special_columns).all()
def test_parse_clo(self):
"""Check data: close_approaches_upcoming,
close_approaches_recent
"""
url_close = ['close_approaches_upcoming',
'close_approaches_recent']
# Columns of close approaches lists
close_columns = ['Object Name', 'Date',
'Miss Distance in km', 'Miss Distance in au',
'Miss Distance in LD', 'Diameter in m',
'*=Yes', 'H', 'Max Bright',
'Rel. vel in km/s']
for url in url_close:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_clo(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty, columns names and length
assert not new_list.empty
assert (new_list.columns == close_columns).all()
assert len(new_list.index) > 100
# Assert Connection Error. In case of internal server error
# the request provided an empty file
foo_error = io.StringIO('This site cant be reached\n'
'domain.com regused to connect\n'
'Search Google for domain\n'
'ERR_CONNECTION_REFUSED')
with pytest.raises(ConnectionError):
lists.parse_clo(foo_error)
# Assert columns data types
# Floats
float_cols = ['Miss Distance in au',
'Miss Distance in LD', 'Diameter in m', 'H',
'Max Bright', 'Rel. vel in km/s']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# int64
assert ptypes.is_int64_dtype(new_list['Miss Distance in km'])
# Object
object_cols = ['Object Name', '*=Yes']
assert all(ptypes.is_object_dtype(new_list[cols3])\
for cols3 in object_cols)
# Datetime
assert | ptypes.is_datetime64_ns_dtype(new_list['Date']) | pandas.api.types.is_datetime64_ns_dtype |
from unittest import TestCase, main
import os
import pandas as pd
import numpy as np
import numpy.testing as npt
from io import StringIO
from metapool.metapool import (read_plate_map_csv, read_pico_csv,
calculate_norm_vol,
format_dna_norm_picklist, assign_index, format_index_picklist,
compute_qpcr_concentration, compute_shotgun_pooling_values_eqvol,
compute_shotgun_pooling_values_qpcr,
compute_shotgun_pooling_values_qpcr_minvol, estimate_pool_conc_vol,
format_pooling_echo_pick_list, plot_plate_vals, make_2D_array,
combine_dfs, parse_dna_conc_csv, add_dna_conc,
compute_pico_concentration, ss_temp, format_sheet_comments,
format_sample_sheet, bcl_scrub_name, rc, sequencer_i5_index,
format_sample_data, reformat_interleaved_to_columns)
class Tests(TestCase):
def setUp(self):
self.maxDiff = None
self.cp_vals = np.array([[10.14, 7.89, 7.9, 15.48],
[7.86, 8.07, 8.16, 9.64],
[12.29, 7.64, 7.32, 13.74]])
self.dna_vals = np.array([[10.14, 7.89, 7.9, 15.48],
[7.86, 8.07, 8.16, 9.64],
[12.29, 7.64, 7.32, 13.74]])
self.qpcr_conc = \
np.array([[98.14626462, 487.8121413, 484.3480866, 2.183406934],
[498.3536649, 429.0839787, 402.4270321, 140.1601735],
[21.20533391, 582.9456031, 732.2655041, 7.545145988]])
self.pico_conc = \
np.array([[38.4090909, 29.8863636, 29.9242424, 58.6363636],
[29.7727273, 30.5681818, 30.9090909, 36.5151515],
[46.5530303, 28.9393939, 27.7272727, 52.0454545]])
# def test_compute_shotgun_normalization_values(self):
# input_vol = 3.5
# input_dna = 10
# plate_layout = []
# for i in range(4):
# row = []
# for j in range(4):
# row.append({'dna_concentration': 10,
# 'sample_id': "S%s.%s" % (i, j)})
# plate_layout.append(row)
# obs_sample, obs_water = compute_shotgun_normalization_values(
# plate_layout, input_vol, input_dna)
# exp_sample = np.zeros((4, 4), dtype=np.float)
# exp_water = np.zeros((4, 4), dtype=np.float)
# exp_sample.fill(1000)
# exp_water.fill(2500)
# npt.assert_almost_equal(obs_sample, exp_sample)
# npt.assert_almost_equal(obs_water, exp_water)
# # Make sure that we don't go above the limit
# plate_layout[1][1]['dna_concentration'] = 0.25
# obs_sample, obs_water = compute_shotgun_normalization_values(
# plate_layout, input_vol, input_dna)
# exp_sample[1][1] = 3500
# exp_water[1][1] = 0
# npt.assert_almost_equal(obs_sample, exp_sample)
# npt.assert_almost_equal(obs_water, exp_water)
def test_read_plate_map_csv(self):
plate_map_csv = \
'Sample\tRow\tCol\tBlank\n' + \
'sam1\tA\t1\tFalse\n' + \
'sam2\tA\t2\tFalse\n' + \
'blank1\tB\t1\tTrue\n' + \
'sam3\tB\t2\tFalse\n'
plate_map_f = StringIO(plate_map_csv)
exp_plate_df = pd.DataFrame({'Sample': ['sam1','sam2','blank1','sam3'],
'Row': ['A','A','B','B'],
'Col': [1,2,1,2],
'Well': ['A1','A2','B1','B2'],
'Blank': [False, False, True, False]})
obs_plate_df = read_plate_map_csv(plate_map_f)
pd.testing.assert_frame_equal(obs_plate_df, exp_plate_df, check_like=True)
def test_read_pico_csv(self):
# Test a normal sheet
pico_csv = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t4039.000\t2.644
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df = pd.DataFrame({'Well': ['A1','A2','B1','B2'],
'Sample DNA Concentration':
[3.432, 3.239, 10.016, 2.644]})
pico_csv_f = StringIO(pico_csv)
obs_pico_df = read_pico_csv(pico_csv_f)
pd.testing.assert_frame_equal(obs_pico_df, exp_pico_df, check_like=True)
# Test a sheet that has some ???? zero values
pico_csv = '''Results
Well ID\tWell\t[Blanked-RFU]\t[Concentration]
SPL1\tA1\t5243.000\t3.432
SPL2\tA2\t4949.000\t3.239
SPL3\tB1\t15302.000\t10.016
SPL4\tB2\t\t?????
Curve2 Fitting Results
Curve Name\tCurve Formula\tA\tB\tR2\tFit F Prob
Curve2\tY=A*X+B\t1.53E+003\t0\t0.995\t?????
'''
exp_pico_df = pd.DataFrame({'Well': ['A1','A2','B1','B2'],
'Sample DNA Concentration':
[3.432, 3.239, 10.016, np.nan]})
pico_csv_f = StringIO(pico_csv)
obs_pico_df = read_pico_csv(pico_csv_f)
pd.testing.assert_frame_equal(obs_pico_df, exp_pico_df, check_like=True)
def test_calculate_norm_vol(self):
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
exp_vols = np.array([[2500., 632.5],
[3500., 3500.]])
obs_vols = calculate_norm_vol(dna_concs)
np.testing.assert_allclose(exp_vols, obs_vols)
def test_format_dna_norm_picklist(self):
exp_picklist = \
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t' + \
'Concentration\tTransfer Volume\tDestination Plate Name\tDestination Well\n' + \
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\tA1\n' + \
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\tA2\n' + \
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\tB1\n' + \
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\tB2\n' + \
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\tA1\n' + \
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\tA2\n' + \
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\tB1\n' + \
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\tB2'
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
sample_names = sample_names,
dna_concs = dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
# test if switching dest wells
exp_picklist = \
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t' + \
'Concentration\tTransfer Volume\tDestination Plate Name\tDestination Well\n' + \
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\tD1\n' + \
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\tD2\n' + \
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\tE1\n' + \
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\tE2\n' + \
'sam1\tSample\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\tD1\n' + \
'sam2\tSample\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\tD2\n' + \
'blank1\tSample\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\tE1\n' + \
'sam3\tSample\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\tE2'
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
dest_wells = np.array([['D1', 'D2'],
['E1', 'E2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
dest_wells = dest_wells,
sample_names = sample_names,
dna_concs = dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
# test if switching source plates
exp_picklist = \
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t' + \
'Concentration\tTransfer Volume\tDestination Plate Name\tDestination Well\n' + \
'sam1\tWater\t384PP_AQ_BP2_HT\tA1\t2.0\t1000.0\tNormalizedDNA\tA1\n' + \
'sam2\tWater\t384PP_AQ_BP2_HT\tA2\t7.89\t2867.5\tNormalizedDNA\tA2\n' + \
'blank1\tWater\t384PP_AQ_BP2_HT\tB1\tnan\t0.0\tNormalizedDNA\tB1\n' + \
'sam3\tWater\t384PP_AQ_BP2_HT\tB2\t0.0\t0.0\tNormalizedDNA\tB2\n' + \
'sam1\tSample_Plate1\t384PP_AQ_BP2_HT\tA1\t2.0\t2500.0\tNormalizedDNA\tA1\n' + \
'sam2\tSample_Plate1\t384PP_AQ_BP2_HT\tA2\t7.89\t632.5\tNormalizedDNA\tA2\n' + \
'blank1\tSample_Plate2\t384PP_AQ_BP2_HT\tB1\tnan\t3500.0\tNormalizedDNA\tB1\n' + \
'sam3\tSample_Plate2\t384PP_AQ_BP2_HT\tB2\t0.0\t3500.0\tNormalizedDNA\tB2'
dna_vols = np.array([[2500., 632.5],
[3500., 3500.]])
water_vols = 3500 - dna_vols
wells = np.array([['A1', 'A2'],
['B1', 'B2']])
sample_names = np.array([['sam1', 'sam2'],
['blank1', 'sam3']])
sample_plates = np.array([['Sample_Plate1', 'Sample_Plate1'],
['Sample_Plate2', 'Sample_Plate2']])
dna_concs = np.array([[2, 7.89],
[np.nan, .0]])
obs_picklist = format_dna_norm_picklist(dna_vols, water_vols, wells,
sample_names = sample_names,
sample_plates = sample_plates,
dna_concs = dna_concs)
self.assertEqual(exp_picklist, obs_picklist)
def test_format_index_picklist(self):
exp_picklist = \
'Sample\tSource Plate Name\tSource Plate Type\tSource Well\tTransfer Volume\tIndex Name\t' + \
'Index Sequence\tIndex Combo\tDestination Plate Name\tDestination Well\n' + \
'sam1\tiTru5_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru5_01_A\tACCGACAA\t0\tIndexPCRPlate\tA1\n' + \
'sam2\tiTru5_plate\t384LDV_AQ_B2_HT\tB1\t250\tiTru5_01_B\tAGTGGCAA\t1\tIndexPCRPlate\tA2\n' + \
'blank1\tiTru5_plate\t384LDV_AQ_B2_HT\tC1\t250\tiTru5_01_C\tCACAGACT\t2\tIndexPCRPlate\tB1\n' + \
'sam3\tiTru5_plate\t384LDV_AQ_B2_HT\tD1\t250\tiTru5_01_D\tCGACACTT\t3\tIndexPCRPlate\tB2\n' + \
'sam1\tiTru7_plate\t384LDV_AQ_B2_HT\tA1\t250\tiTru7_101_01\tACGTTACC\t0\tIndexPCRPlate\tA1\n' + \
'sam2\tiTru7_plate\t384LDV_AQ_B2_HT\tA2\t250\tiTru7_101_02\tCTGTGTTG\t1\tIndexPCRPlate\tA2\n' + \
'blank1\tiTru7_plate\t384LDV_AQ_B2_HT\tA3\t250\tiTru7_101_03\tTGAGGTGT\t2\tIndexPCRPlate\tB1\n' + \
'sam3\tiTru7_plate\t384LDV_AQ_B2_HT\tA4\t250\tiTru7_101_04\tGATCCATG\t3\tIndexPCRPlate\tB2'
sample_wells = np.array(['A1', 'A2', 'B1', 'B2'])
sample_names = np.array(['sam1', 'sam2', 'blank1', 'sam3'])
indices = pd.DataFrame({'i5 name': {0: 'iTru5_01_A',
1: 'iTru5_01_B',
2: 'iTru5_01_C',
3: 'iTru5_01_D'},
'i5 plate': {0: 'iTru5_plate',
1: 'iTru5_plate',
2: 'iTru5_plate',
3: 'iTru5_plate'},
'i5 sequence': {0: 'ACCGACAA', 1: 'AGTGGCAA',
2: 'CACAGACT', 3: 'CGACACTT'},
'i5 well': {0: 'A1', 1: 'B1', 2: 'C1', 3: 'D1'},
'i7 name': {0: 'iTru7_101_01',
1: 'iTru7_101_02',
2: 'iTru7_101_03',
3: 'iTru7_101_04'},
'i7 plate': {0: 'iTru7_plate',
1: 'iTru7_plate',
2: 'iTru7_plate',
3: 'iTru7_plate'},
'i7 sequence': {0: 'ACGTTACC', 1: 'CTGTGTTG',
2: 'TGAGGTGT', 3: 'GATCCATG'},
'i7 well': {0: 'A1', 1: 'A2', 2: 'A3', 3: 'A4'},
'index combo': {0: 0, 1: 1, 2: 2, 3: 3},
'index combo seq': {0: 'ACCGACAAACGTTACC',
1: 'AGTGGCAACTGTGTTG',
2: 'CACAGACTTGAGGTGT',
3: 'CGACACTTGATCCATG'}})
obs_picklist = format_index_picklist(sample_names, sample_wells, indices)
self.assertEqual(exp_picklist, obs_picklist)
def test_compute_qpcr_concentration(self):
obs = compute_qpcr_concentration(self.cp_vals)
exp = self.qpcr_conc
npt.assert_allclose(obs, exp)
def test_compute_shotgun_pooling_values_eqvol(self):
obs_sample_vols = \
compute_shotgun_pooling_values_eqvol(self.qpcr_conc,
total_vol=60.0)
exp_sample_vols = np.zeros([3, 4]) + 60.0/12*1000
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
def test_compute_shotgun_pooling_values_eqvol_intvol(self):
obs_sample_vols = \
compute_shotgun_pooling_values_eqvol(self.qpcr_conc,
total_vol=60)
exp_sample_vols = np.zeros([3, 4]) + 60.0/12*1000
npt.assert_allclose(obs_sample_vols, exp_sample_vols)
def test_compute_shotgun_pooling_values_qpcr(self):
sample_concs = np.array([[1, 12, 400],
[200, 40, 1]])
exp_vols = np.array([[0, 50000, 6250],
[12500, 50000, 0]])
obs_vols = compute_shotgun_pooling_values_qpcr(sample_concs)
npt.assert_allclose(exp_vols, obs_vols)
def test_compute_shotgun_pooling_values_qpcr_minvol(self):
sample_concs = np.array([[1, 12, 400],
[200, 40, 1]])
exp_vols = np.array([[100, 100, 4166.6666666666],
[8333.33333333333, 41666.666666666, 100]])
obs_vols = compute_shotgun_pooling_values_qpcr_minvol(sample_concs)
npt.assert_allclose(exp_vols, obs_vols)
def test_estimate_pool_conc_vol(self):
obs_sample_vols = compute_shotgun_pooling_values_eqvol(
self.qpcr_conc, total_vol=60.0)
obs_pool_conc, obs_pool_vol = estimate_pool_conc_vol(
obs_sample_vols, self.qpcr_conc)
exp_pool_conc = 323.873027979
exp_pool_vol = 60000.0
npt.assert_almost_equal(obs_pool_conc, exp_pool_conc)
npt.assert_almost_equal(obs_pool_vol, exp_pool_vol)
def test_format_pooling_echo_pick_list(self):
vol_sample = np.array([[10.00, 10.00, 5.00, 5.00, 10.00, 10.00]])
header = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
exp_values = ['1,384LDV_AQ_B2_HT,A1,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A2,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A3,,5.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A4,,5.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A5,,10.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A6,,10.00,NormalizedDNA,A2']
exp_str = '\n'.join(header + exp_values)
obs_str = format_pooling_echo_pick_list(vol_sample,
max_vol_per_well=26,
dest_plate_shape=[16,24])
self.maxDiff = None
self.assertEqual(exp_str, obs_str)
def test_format_pooling_echo_pick_list(self):
vol_sample = np.array([[10.00, 10.00, np.nan, 5.00, 10.00, 10.00]])
header = ['Source Plate Name,Source Plate Type,Source Well,'
'Concentration,Transfer Volume,Destination Plate Name,'
'Destination Well']
exp_values = ['1,384LDV_AQ_B2_HT,A1,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A2,,10.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A3,,0.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A4,,5.00,NormalizedDNA,A1',
'1,384LDV_AQ_B2_HT,A5,,10.00,NormalizedDNA,A2',
'1,384LDV_AQ_B2_HT,A6,,10.00,NormalizedDNA,A2']
exp_str = '\n'.join(header + exp_values)
obs_str = format_pooling_echo_pick_list(vol_sample,
max_vol_per_well=26,
dest_plate_shape=[16,24])
self.maxDiff = None
self.assertEqual(exp_str, obs_str)
def test_make_2D_array(self):
example_qpcr_df = pd.DataFrame({'Cp': [12, 0, 5, np.nan],
'Pos': ['A1','A2','A3','A4']})
exp_cp_array = np.array([[12.0,0.0,5.0,np.nan]])
np.testing.assert_allclose(make_2D_array(example_qpcr_df, rows=1, cols=4).astype(float), exp_cp_array)
example2_qpcr_df = pd.DataFrame({'Cp': [12, 0, 1, np.nan,
12, 0, 5, np.nan],
'Pos': ['A1','A2','A3','A4',
'B1','B2','B3','B4']})
exp2_cp_array = np.array([[12.0,0.0,1.0,np.nan],
[12.0,0.0,5.0,np.nan]])
np.testing.assert_allclose(make_2D_array(example2_qpcr_df, rows=2, cols=4).astype(float), exp2_cp_array)
def combine_dfs(self):
exp_df_f = '''Sample\tWell\tPlate\tCounter\tPrimer_i5\tSource_Well_i5\tIndex_i5\tPrimer_i7\tSource_Well_i7\tIndex_i7\tDNA_concentration\tTransfer_Volume\tCp
8_29_13_rk_rh\tA1\tABTX_35\t1841.0\tiTru5_01_G\tG1\tGTTCCATG\tiTru7_110_05\tA23\tCGCTTAAC\t12.751753\t80.0\t20.55
8_29_13_rk_lh\tC1\tABTX_35\t1842.0\tiTru5_01_H\tH1\tTAGCTGAG\tiTru7_110_06\tB23\tCACCACTA\t17.582063\t57.5\t9.15'''
test_index_picklist_f = '''\tWell Number\tPlate\tSample Name\tSource Plate Name\tSource Plate Type\tCounter\tPrimer\tSource Well\tIndex\tUnnamed: 9\tUnnamed: 10\tUnnamed: 11\tTransfer volume\tDestination Well\tUnnamed: 14
0\t1\tABTX_35\t8_29_13_rk_rh\ti5 Source Plate\t384LDV_AQ_B2_HT\t1841.0\tiTru5_01_G\tG1\tGTTCCATG\tiTru7_110_05\tA23\tCGCTTAAC\t250\tA1\tNaN
1\t2\tABTX_35\t8_29_13_rk_lh\ti5 Source Plate\t384LDV_AQ_B2_HT\t1842.0\tiTru5_01_H\tH1\tTAGCTGAG\tiTru7_110_06\tB23\tCACCACTA\t250\tC1\tNaN
2\t1\tABTX_35\t8_29_13_rk_rh\ti7 Source Plate\t384LDV_AQ_B2_HT\t1841.0\tiTru7_110_05\tA23\tCGCTTAAC\t\t\t\t250\tA1\tNaN
3\t2\tABTX_35\t8_29_13_rk_lh\ti7 Source Plate\t384LDV_AQ_B2_HT\t1842.0\tiTru7_110_06\tB23\tCACCACTA\t\t\t\t250\tC1\tNaN'''
test_dna_picklist_f = '''\tSource Plate Name\tSource Plate Type\tSource Well\tConcentration\tTransfer Volume\tDestination Plate Name\tDestination Well
0\twater\t384LDV_AQ_B2_HT\tA1\tNaN\t3420.0\tNormalizedDNA\tA1
1\twater\t384LDV_AQ_B2_HT\tC1\tNaN\t3442.5\tNormalizedDNA\tC1
5\t1\t384LDV_AQ_B2_HT\tA1\t12.751753\t80.0\tNormalizedDNA\tA1
6\t1\t384LDV_AQ_B2_HT\tC1\t17.582063\t57.5\tNormalizedDNA\tC1'''
test_qpcr_f = '''\tInclude\tColor\tPos\tName\tCp\tConcentration\tStandard\tStatus
0\tTRUE\t255\tA1\tSample 1\t20.55\tNaN\t0\tNaN
1\tTRUE\t255\tC1\tSample 2\t9.15\tNaN\t0\tNaN'''
exp_out_f = '''Well\tCp\tDNA Concentration\tDNA Transfer Volume\tSample Name\tPlate\tCounter\tPrimer i7\tSource Well i7\tIndex i7\tPrimer i5\tSource Well i5\tIndex i5
A1\t20.55\t12.751753\t80.0\t8_29_13_rk_rh\tABTX_35\t1841.0\tiTru7_110_05\tA23\tCGCTTAAC\tiTru5_01_G\tG1\tGTTCCATG
C1\t9.15\t17.582063\t57.5\t8_29_13_rk_lh\tABTX_35\t1842.0\tiTru7_110_06\tB23\tCACCACTA\tiTru5_01_H\tH1\tTAGCTGAG'''
test_index_picklist_df = pd.read_csv(StringIO(test_index_picklist_f), header=0, sep='\t')
test_dna_picklist_df = pd.read_csv(StringIO(test_dna_picklist_f), header=0, sep='\t')
test_qpcr_df = pd.read_csv(StringIO(test_qpcr_f), header=0, sep='\t')
exp_df = pd.read_csv(StringIO(exp_out_f), header=0, sep='\t')
combined_df = combine_dfs(test_qpcr_df, test_dna_picklist_df, test_index_picklist_df)
| pd.testing.assert_frame_equal(combined_df, exp_df, check_like=True) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
| tm.assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
# @Author: <NAME>
# @Date: Mon, May 4th 2020, 8:36 pm
# @Email: <EMAIL>
# @Filename: migrate_db.py
'''
Functions for duplicating, archiving, and converting database assets, including raw source files as well as SQLite db files.
'''
from tqdm import tqdm
from os.path import isfile
import shutil
import pandas as pd
import time
from pyleaves.utils.img_utils import DaskCoder, CorruptJPEGError
def duplicate_raw_dataset(data :pd.DataFrame, omitted_rows: list=[]):
"""
Uses shutil.copy2 to duplicate a sequence of files at a new location to be as close to the original files as possible.
# TODO Check if file hash remains the same, if so, then can be used as a test for successful duplication.
Parameters
----------
data : pd.DataFrame
Contains all necessary info to duplicate the files. For each file to be duplicated, must have the corresponding file paths in the
source_path and target_path columns, respectively. The choice of how to determine the best target path must be made prior to this function.
omitted_rows : list
Optional list of omitted sample rows. Unsuccessful copy attempts will be logged here and returned from function.
Returns
-------
pd.DataFrame
duplicated_data: Same format as input data, only contains successful samples.
list
omitted_rows: list of dataframes, containing unsuccessful rows.
"""
data = data.copy()
file_not_found = []
copy_errors = []
for i, row in tqdm(data.iterrows()):
try:
if isfile(row.target_path):
continue
shutil.copy2(row.source_path, row.target_path)
assert isfile(row.target_path)
except FileNotFoundError as e:
print(str(e))
file_not_found.append(row)
print(f'total {len(file_not_found)+len(copy_errors)} files not found so far')
except AssertionError as e:
print(str(e))
copy_errors.append(row)
print(f'total {len(file_not_found)+len(copy_errors)} files not found so far')
if len(file_not_found):
file_not_found_df = | pd.concat(file_not_found,axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from io import StringIO
import re
import csv
from csv import reader, writer
import sys
import os
import glob
import fnmatch
from os import path
import matplotlib
from matplotlib import pyplot as plt
print("You are using Zorbit Analyzer v0.1")
directory_path = input("Please enter the path to the directory of your files. All files should be in the same location: ") #Asks users for path
os.chdir(directory_path)
x = input('Input your Interproscan output gff3 file(s):') #Asks users for gff3 input
if "*" in x: #Handles the case of *.gff3
gff3_input = glob.glob("*.gff3")
else:
y = re.sub('[|; ]', ', ', x) #Substitutes possible gff3 file delimeters with commas
gff3_input = re.split(', ', y) #Splits gff3 input into a list
for i in gff3_input:
if os.path.exists(i): #Checks existence of gff3 file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
fasta_input = input('Input your fasta file:') #Asks users for fasta input file
if os.path.exists(fasta_input): #Checks existence of fasta input file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
if fnmatch.fnmatch(fasta_input, '*fastq*'):
print("Zorbit Analyzer is not specifically constructed to handle fastq files but will try. If errors convert to fasta format")
ortho_input = input ('Input your ProteinOrtho output file:') #Asks users for ProteinOrtho input
if os.path.exists(ortho_input): #Checks existence of ProteinOrtho input
pass
else:
print("There does not seem to be a file by that name. Please check your path/filename and try again")
sys.exit()
ortho_input_file_name = input ('Input your ProteinOrtho input file name (faa). Leave blank if unknown though will run slower:') #Asks users for ProteinOrtho output file
while True:
file_to_write = input('Input your desired ZorbitAnalyzer output file name: ') #Asks users for output file
if file_to_write != '': #Checks to see if user entered a file name
break
else:
print("You did not enter an output file name") #Repeatedly asks for output file name if not given
continue
Choice = ['yes', 'y', 'no', 'n']
flag = True
while flag is True:
exclusion_flag = input("Would you like to exclude sequences that do not have either Interproscan or ProteinOrtho hits? (Yes/No) ").lower()
for i in Choice:
if exclusion_flag.startswith(i):
flag = False
break
else:
continue
if exclusion_flag.startswith('y'):
exclusion_flag = 1
else:
exclusion_flag = 0
print("Analyzing files") #Lets user know input portion has completed
pdortho = pd.read_csv(ortho_input, "/t", engine="python") #Creates ProteinOrtho pd
test_file = 'test.txt'
test2_file = 'test2.txt'
test3_file = 'test3.txt'
#Testing open/closing files
def try_file(input_file): #Defining function that creates/opens user output file and truncates it before closing it
try:
open(input_file, 'w+').close()
except IOError:
print("Unable to open output file")
try_file('file_to_write.txt') #Creates/opens output file and truncates it before closing it
try_file('test.txt') #Creates/opens test file and truncates it before closing it
try_file('gff3_file_to_write.txt') #Creates/opens gff3 output file and truncates it before closing it
try_file('gff3_statsfile_to_write.txt') #Creates/opens gff3 output file and truncates it before closing i
try_file('fasta_file_to_write.txt') #Creates/opens fasta output file and truncates it before closing it
try_file('ortho_file_to_write.txt') #Creates/opens ProteinOrtho output file and truncates it before closing it
try_file('ortho_file_to_write2.txt') #Creates/opens a second ProteinOrtho output file and truncates it before closing it
try_file('zorbit_statistics.txt') #Creates/opens a statistics file and truncates it before closing it
#Defining variables for later use
fasta_file_to_write = 'fasta_file_to_write.txt' #Defining the interim fasta file to write
gff3_file_to_write = 'gff3_file_to_write.txt' #Defining the interim gff3 file to write
gff3_statsfile_to_write = 'gff3_statsfile_to_write.txt'
ortho_file_to_write = 'ortho_file_to_write.txt' #Defining the interim Protein Ortho file to write
zorbit_statistics = 'zorbit_statistics.txt' #Defining the Zorbit Statistics variable
string_to_remove1 = '##' #Removes header and gene introduction lines
string_to_remove2 = 'polypeptide' #Removes redundant polypeptide line
string_to_remove3 = 'MobiDBLite' #Removes results from MobiDBLite database
string_to_end = '##FASTA' #Sets end of file as the start of the fasta/code part of gff3 files
#fasta
fasta_file = None
fastq_file = None
fasta_type = "amino_acid"
fastq_start_character = '@'
fasta_start_character = '>' #Setting start character for fasta information line
fastq_third_line_character ='+'
fna_type = "fna"
if fna_type in fasta_input:
fasta_type = "nucleotide"
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_file = fasta_input
break
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fastq_file = fasta_input
fasta_type = "nucleotide"
break
else:
print("The fasta input file does not seem to have typical fasta or fastq format")
sys.exit()
if fasta_file is not None: #Checking to see if fasta input was fasta file (should not be empty)
print("Working on fasta file")
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a') as f: #Opens the output file to append
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_nostart = re.sub('>', '\n', line) #Removing > symbol and replacing with carriage return from each occurrence
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
else:
if not line.isspace(): #Will not write blank lines
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
continue
elif fastq_file is not None: #Checking to see if fasta input was fastq file (should not be empty)
print("Working on fastq file")
with open(fasta_input, 'r', encoding="latin-1") as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a', encoding="latin-1") as f: #Opens the output file to append
for i, line in enumerate(fasta): #reading lines in fasta file
if i == 0: # Dealing with first line differently (no line break)
fasta_nostart = re.sub('@', '', line) #Removing @ symbol from each occurrence and replaces with nothing
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fasta_nostart = re.sub('@', '\n', line) #Removing @ symbol from each occurrence and replaces with carriage return
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replaces with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replaces with comma
f.write(fasta_csv) #Writes output to file
elif i % 4 == 1: #Writing line 2/4 (sequence file) to output file
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
pass
else:
print("The input file does not seem to be in typical fasta or fastq format. Please check and try again") #Ending if atypical fasta/fastq format
sys.exit()
for i in gff3_input: #Cleaning up gff3 file prior to conversion to dataframe
with open(i, 'r') as stack:
with open(gff3_file_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
elif string_to_remove2 in line: #Removing polypeptide line (if present)
continue
elif string_to_remove3 in line: #Removing MobiDBLite database (if present)
continue
else:
f.write(line)
for i in gff3_input: #Saving unedited gff3 input into file for statistics purposes later
with open(i, 'r') as stack:
with open(gff3_statsfile_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header and gene introduction lines (if present)
continue
else:
f.write(line)
fasta_column_names = ['SeqID', 'Information', 'Sequence'] #Defining the list of fasta column names to pass to the dataframe
fastapd = pd.read_csv(fasta_file_to_write, names=fasta_column_names, engine = "python", header=None) #Creating a Pandas dataframe from the fasta output csv
SeqID_list = fastapd["SeqID"].tolist() #Saving contents of the SeqID column to a list
fasta_row_number = len(fastapd) #Counting the number of rows in the fasta dataframe for the statistics output
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the fasta is " + str(fasta_row_number) + "\n")
#Start orthopd
print("Working on ProteinOrtho dataframe")
orthopd = pd.read_csv(ortho_input, sep='\t', engine="python", na_values="*") #Creates a Pandas dataframe from ProteinOrtho input csv
ortho_column_names = list(orthopd.columns)
#Defining the SeqID column
if ortho_input_file_name != "":
orthopd.columns = ["SeqID" if col.startswith(ortho_input_file_name) else col for col in orthopd.columns] #Renaming the fasta input column in ProteinOrtho dataframe to SeqID to match other dataframes
else: pass
#Attempting to identify which column corresponds to the input fasta
fasta_input_split = fasta_input.split('.', 1)[0] #Trying to delete file handle from the fasta input file in case there was .fasta versus .faa, etc
orthopd_pruned = orthopd.drop(columns=['# Species', 'Genes', 'Alg.-Conn.']) #Creating a new dataframe without the first three columns which will always have data in each row in order to id longest column
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Checking to see if fasta input file name is in the ProteinOrtho column name list
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Trying to find fasta file in ProteinOrtho file through other means")
orthopd.columns = ["SeqID" if col.startswith(fasta_input_split) else col for col in orthopd.columns] #Using the input fasta file name as a guess for the faa file name
if orthopd.columns.astype(str).str.contains("SeqID").any(): #Breaks loops if the column name has been found/replaced
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Attempting another way of identifying fasta file column. This may take some time")
orthopd_fasta_column_name = orthopd_pruned.count().idxmax() #Finding column with the least number of NaN which is likely the input fasta
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the putative SeqID ProteinOrtho column
if orthopd[orthopd_fasta_column_name].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(orthopd_fasta_column_name, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
else:
print("Final method to identify fasta file column. This may take hours")
orthopd = orthopd.drop(orthopd[(orthopd['Genes'] == 1)].index) #Gets rid of rows with just a single gene found in order to speed up full frame search
for l in SeqID_list: #Searching to see if any values from the fastapd SeqID column (l) are in the ProteinOrtho dataframe
for i in orthopd.columns:
if orthopd[i].astype(str).str.contains(l).any():
orthopd.rename(columns=lambda x: x.replace(i, "SeqID"), inplace=True) #Renaming the ProteinOrtho column with fasta sequence names as SeqID
break
orthopd = orthopd.drop(orthopd[(orthopd['SeqID'].isna())].index)#Removing SeqID rows with NaN
#Splitting the duplicated entries in the SeqID column and making new rows with a SeqID member on each but with same data otherwise
def pir2(df, c): #Defining function to split the SeqID column at each comma and place one of each split value onto a new, otherwise duplicated row
colc = df[c].astype(str).str.split(',')
clst = colc.values.astype(object).tolist()
lens = [len(l) for l in clst]
j = df.columns.get_loc(c)
v = df.values
n, m = v.shape
r = np.arange(n).repeat(lens)
return pd.DataFrame(
np.column_stack([v[r, 0:j], np.concatenate(clst), v[r, j+1:]]),
columns=orthopd.columns
)
orthopd3 = pir2(orthopd, "SeqID") #Running column split function on the SeqID column on orthopd
print("Beginning data analysis on the ProteinOrtho dataframe")
#Graph Algebraic Connectivity
orthopd_algconn_nozero = orthopd3[orthopd3['Alg.-Conn.'] != 0] #Removing zero and one counts in orthopd for graph
orthopd_algconn_noone = orthopd_algconn_nozero[orthopd_algconn_nozero['Alg.-Conn.'] != 1] #Getting the count of each Alg.Conn in the gff3 dataframe
orthopd_algconn_noone['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity without Unity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph_noone.png")#Saving graph to file
plt.clf()
orthopd_algconn_nozero['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph.png")#Saving graph to file
plt.clf()
#Graph Gene Counts
orthopd_gene_count_values = orthopd3['Genes'].value_counts() #Getting the count of each database in the gff3 dataframe
orthopd_gene_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Graph of Gene Counts')
plt.xlabel('Number of Shared transcripts')
plt.ylabel('Number of Genes with same frequency')
plt.tight_layout()
plt.savefig("ProteinOrtho_gene_graph.png")#Saving graph to file
plt.clf()
#Start gff3pd
print("Working on gff3 dataframe")
gff3pd_column_names = ['SeqID', 'Database', 'Match type', 'Start', 'Stop', 'Score', 'Strand', 'Phase', 'Match information'] #Renaming static gff3 columns
statsgff3pd = pd.read_csv(gff3_statsfile_to_write, sep='\t', names=gff3pd_column_names, header=None, engine="python") #Creating a dataframe for gff3 stats
gff3pd_original_row_number = len(statsgff3pd) #Counting the number of rows in the original gff3pd dataframe for the statistics output
with open(zorbit_statistics, 'a') as f: #Writing the number of rows in the original gff3pd dataframe to the statistics output
f.write("The number of sequences in the original gff3 file is " + str(gff3pd_original_row_number) + "\n")
gff3pd = pd.read_csv(gff3_file_to_write, sep='\t', names=gff3pd_column_names, header=None, engine = "python") #Creating a Pandas dataframe from the gff3 output csv
gff3pd_row_number = len(gff3pd) #Counting the number of rows in the final gff3 file dataframe for the statistics output
gff3pd_max_score = gff3pd['Score'].max() #Finding maximum value in Score column of gff3 dataframe
gff3pd_without_null = gff3pd[gff3pd['Score'] != "."] #Finding minimum value in Score column of gff3 dataframe
gff3pd_without_null_or_zero = gff3pd_without_null[gff3pd_without_null['Score'] != 0.0]
gff3pd_min_score = gff3pd_without_null_or_zero['Score'].min()
statsgff3pd_without_null = statsgff3pd[statsgff3pd['Score'] != "."]
statsgff3pd_max_score = statsgff3pd_without_null['Score'].max()
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the gff3 file after removal of MobiDBLite and duplicates is " + str(gff3pd_row_number) + "\n") #Adding cleaned gff3 stastitics to file
f.write("The range of quality scores for the gff3 file range from " + str(gff3pd_min_score) + " to " + str(gff3pd_max_score) + "\n")#Adding range of scores to statistics file
f.write("The maximum quality score for the original gff3 file is " + str(statsgff3pd_max_score) + "\n")
#Graph database distribution
gff3pd_database_count_values = gff3pd['Database'].value_counts() #Getting the count of each database in the gff3 dataframe
gff3pd_database_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Distribution of Database hits')
plt.xlabel('Database name')
plt.ylabel('Number of Database hits')
plt.tight_layout()
plt.savefig("Gff3_database_graph.png")#Saving graph to file
plt.clf()
#Preparing dataframes for merging
print("Preparing dataframes for merge")
gff3pd['SeqID'] = gff3pd['SeqID'].astype(str) #Setting column type as string
orthopd3['SeqID'] = orthopd3['SeqID'].astype(str) #Setting column type as string
fastapd['SeqID'] = fastapd['SeqID'].astype(str) #Setting column type as string
#Dealing with fna versus faa
protein_flag = 0
if fasta_type == "nucleotide": #Checking to see if the fasta_type is nucleotide
gff3pd_split = gff3pd['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
gff3pd['SeqID'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
orthopd_split = orthopd3['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd['SeqID'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
else:
#Pulling out reading frame information
protein_flag = 1
gff3pd['SeqID2'] = gff3pd['SeqID']
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra number after the fasta SeqID
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column as the split column
gff3pd_split = gff3pd['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
gff3pd['SeqID2'] = gff3pd_split[0] #Setting the gff3 SeqID column
gff3pd['Reading_Frame'] = gff3pd_split[1] #Setting the gff3 Frame column
gff3pd = gff3pd.drop(['SeqID2'], axis=1)
orthopd3['SeqID2'] = orthopd3['SeqID']
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra two numbers after the fasta SeqID to allow match
orthopd3['SeqID2'] = orthopd_split[0] #Setting the ProteinOrtho SeqID column as the split column
orthopd_split = orthopd3['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
orthopd3['SeqID2'] = orthopd_split[0] #Setting the orthopd SeqID column
orthopd3['Reading_Frame'] = orthopd_split[1] #Setting the gff3 Frame column
orthopd = orthopd3.drop(['SeqID2'], axis=1)
#Merging
print("Combining dataframes")
gff3_ortho_merge = pd.merge(orthopd, gff3pd, how='outer', on=['SeqID']) #Merging the ProteinOrtho and interproscan dataframes
all_merge = pd.merge(gff3_ortho_merge, fastapd, how='outer', on=['SeqID']) #Merging the fasta dataframe with the combined ProteinOrtho/Interproscan dataframes
#Adding marks to merged dataframe to make fasta
all_merge['SeqID'] = all_merge['SeqID'].apply(lambda x: f'>{x}') #Placing > at the beginning of each new line and a tab at the end of SeqID
all_merge['Sequence'] = all_merge['Sequence'].apply(lambda x: f'\n{x}') #Placing a new line before the Sequence data
all_merge = all_merge[ ['SeqID'] + [ col for col in all_merge.columns if col != 'SeqID' ] ] #Moving SeqID to the far left of the dataframe
all_merge = all_merge[ [ col for col in all_merge.columns if col != 'Sequence' ] + ['Sequence'] ] #Moving Sequence to the far right of the dataframe
#Statistics on the merged dataframe
all_merge_both = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] == 1))].index)
all_merge_neither = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] !=1))].index)
all_merge_just_ortho = all_merge.drop(all_merge[((all_merge['Database'].notna()) | (all_merge['Genes'] == 1))].index)
all_merge_just_inter = all_merge.drop(all_merge[((all_merge['Database'].isna()) | (all_merge['Genes'] !=1))].index)
all_merge_all = len(pd.unique(all_merge['SeqID'])) #Calculating the number of unique sequences
all_merge_both = len(pd.unique(all_merge_both['SeqID'])) #Calculating unique sequences with both interproscan and proteinortho hits
all_merge_neither = len(pd.unique(all_merge_neither['SeqID'])) #Calculating unique sequences without interproscan or proteinortho hits
all_merge_just_ortho = len(pd.unique(all_merge_just_ortho['SeqID'])) #Calculating unique sequences with proteinortho but not interproscan hits
all_merge_just_inter = len( | pd.unique(all_merge_just_inter['SeqID']) | pandas.unique |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import pandas as pd
import numpy as np
from lux.utils import date_utils
from lux.executor.PandasExecutor import PandasExecutor
def test_dateformatter():
ldf = pd.read_csv("lux/data/car.csv")
# change pandas dtype for the column "Year" to datetype
ldf["Year"] = pd.to_datetime(ldf["Year"], format="%Y")
timestamp = np.datetime64("2019-08-26")
ldf.maintain_metadata()
assert date_utils.date_formatter(timestamp, ldf) == "2019"
ldf["Year"][0] = np.datetime64("1970-03-01") # make month non unique
assert date_utils.date_formatter(timestamp, ldf) == "2019-8"
ldf["Year"][0] = np.datetime64("1970-03-03") # make day non unique
assert date_utils.date_formatter(timestamp, ldf) == "2019-8-26"
def test_period_selection():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format="%Y")
ldf["Year"] = pd.DatetimeIndex(ldf["Year"]).to_period(freq="A")
ldf.set_intent(
[
lux.Clause(attribute=["Horsepower", "Weight", "Acceleration"]),
lux.Clause(attribute="Year"),
]
)
PandasExecutor.execute(ldf.current_vis, ldf)
assert all([type(vlist.data) == lux.core.frame.LuxDataFrame for vlist in ldf.current_vis])
assert all(ldf.current_vis[2].data.columns == ["Year", "Acceleration"])
def test_period_filter():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format="%Y")
ldf["Year"] = | pd.DatetimeIndex(ldf["Year"]) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 13:08:45 2021
@author: MalvikaS
Build classifier on RNA seq data
"""
# Import
import os
import pandas as pd
import glob
import random
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.ensemble import EasyEnsembleClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, precision_score
from sklearn.metrics import recall_score
import pickle
def getNeighFeat(data_rna, G, neighbors, norm = False):
"""
Function for getting neighbourhood features. Calculates the sum of logFC
across the different neighbours of the gene.
To normalize for number of neighbours with change in fold change (FC), the
norm parameter need to be True.
Function considers all genes as neighbours as passed in arguments. These
neighbours are assumed to be n hops away.
Parameters
----------
data_rna : DataFrame
DataFrame containing the fold change data. Should contain column
"logFC".
G : networkx graph object
Undirected graph of protein protein interactions.
neighbors : dict
Dictionary with gene as keys and list of neighbours as value.
norm : bool, optional
Whether to normalise for neighbour up or down-regulated. The default
is False.
Returns
-------
feat : list
list of values for each gene.
"""
# Get sum of neighbour FC
feat = []
for gene in data_rna.genes:
val = 0
count = 0
if gene in G.nodes:
for neigh in neighbors[gene]:
if neigh in data_rna.genes and abs(data_rna.loc[neigh, "logFC"]) > 2:
val = val + abs(data_rna.loc[neigh, "logFC"])
count = count + 1
if norm and count != 0:
feat.append(val / count)
else:
feat.append(val)
return feat
def getRNAFeatures(datapath, file, ctype, path_network, n = 1):
"""
Function for generating RNA features using egdeR results as well as network.
Parameters
----------
datapath : str
Complete folder path where RNA processed files are saved. Each file
contains DEGs for each patient. Must contain columns "genes", 'logFC'.
file : str
File name to be read and features generated. A default ".tsv" is
included to the file name.
ctype : str
Cancer-type.
path_network : str
Complete path to where network data is stored as pkl files.
n : int, optional
Number hops to be considered while defining neighbours.
The default is 1.
Returns
-------
DataFrame
DataFrame containing all the RNA features to be used for model building.
"""
# Load network
os.chdir(path_network)
with open("string_graph.pkl", "rb") as f:
G = pickle.load(f)
with open("string_degree.pkl", "rb") as f:
deg = pickle.load(f)
with open("string_bc.pkl", "rb") as f:
bc = pickle.load(f)
with open("string_cc.pkl", "rb") as f:
cc = pickle.load(f)
with open("string_neigh_{}.pkl".format(n), "rb") as f:
neighbors = pickle.load(f)
# Load RNA data
# Get sample name
samp = file
# Read RNA file
os.chdir(datapath)
data_rna = pd.read_csv(file+".tsv", header=0, index_col=0, sep="\t")
# Get degree
temp =[deg[gene] if gene in G.nodes else 0 for gene in data_rna.genes]
data_rna["Degree"] = temp
# Get closeness centrality
temp =[cc[gene] if gene in G.nodes else 0 for gene in data_rna.genes]
data_rna["Closeness_centrality"] = temp
# Get betweeness centrality
temp =[bc[gene] if gene in G.nodes else 0 for gene in data_rna.genes]
data_rna["Betweeness_centrality"] = temp
# Get FC x degree
temp =[fc * d if abs(fc) >2 else 0 for fc, d in zip(data_rna.logFC, data_rna.Degree)]
data_rna["FC_Degree"] = temp
# Get FC x Closeness_centrality
temp =[fc * c if abs(fc) >2 else 0 for fc, c in zip(data_rna.logFC, data_rna.Closeness_centrality)]
data_rna["FC_Closeness_centrality"] = temp
# Get FC x Betweeness_centrality
temp =[fc * b if abs(fc) >2 else 0 for fc, b in zip(data_rna.logFC, data_rna.Betweeness_centrality)]
data_rna["FC_Betweeness_centrality"] = temp
# Get sum of FC of neighbours
data_rna["neigh_FC"] = getNeighFeat(data_rna, G, neighbors, norm = False)
# Get normalized sum of FC of neighbours
data_rna["neigh_normFC"] = getNeighFeat(data_rna, G, neighbors, norm = True)
# Assign indices
data_rna.index = ["{};{}".format(samp, gene) for gene in data_rna.genes]
data_rna["Tumor_Sample_Barcode"] = [samp] * len(data_rna)
return data_rna
def getRNA_X(sample_list, DATAPATH, ctype, lab_type):
"""
Get X for RNA. The required columns are retained and all other rows and
columns dropped. This function also labels the data for building models.
Parameters
----------
sample_list : list
List of tumour samples to be retained.
DATAPATH : str
Complete path to SNV data for the samples and other data for different
laabelling techniques.
ctype : str
Cancer-type.
lab_type : str
Labelling stratergy to be used.
Returns
-------
data : DataFrame
DataFrame containing feature matrix to be trained on and labels.
data_meta : DataFrame
DataFrame containing mata data for the feature matrix.
"""
# Load SNV data (for labelling)
os.chdir(DATAPATH + "/GDC_{}/SNV".format(ctype))
fname="{}_snv.tsv".format(ctype)
snv_lab = pd.read_csv(fname, sep="\t", header=0)
snv_lab.Tumor_Sample_Barcode = [samp[:16] for samp in snv_lab.Tumor_Sample_Barcode]
snv_lab = snv_lab[snv_lab.Tumor_Sample_Barcode.isin(sample_list)]
snv_lab.index = ["{};{}".format(samp[:16], gene) for samp, gene in zip(snv_lab.Tumor_Sample_Barcode, snv_lab.Hugo_Symbol)]
# Add labels
if lab_type == "civic":
snv_lab = snv.getCivicLabels(snv_lab, DATAPATH)
if lab_type == "martellotto":
snv_lab = snv.getMartelottoLabels(snv_lab, DATAPATH)
if lab_type == "cgc":
snv_lab = snv.getCGCLabels(snv_lab, DATAPATH)
if lab_type == "bailey":
snv_lab = snv.getBaileyLabels(snv_lab, DATAPATH, ctype)
# Remove duplicates and keep labelled data_snp
snv_lab = snv_lab[snv_lab.Label != "Unlabelled"]
snv_lab = snv_lab[~snv_lab.index.duplicated()]
# load data
path_network = DATAPATH + "/network"
data = [None] * len(sample_list)
datapath = DATAPATH + "/GDC_{}/RNA-seq".format(ctype)
for idx, file in enumerate(sample_list):
temp = getRNAFeatures(datapath, file, ctype, path_network, n=1)
# Assign labels to RNA data
temp["Label"] = [snv_lab.loc[idx, "Label"] if idx in snv_lab.index else "Unlabelled" for idx in temp.index]
temp = temp[temp["Label"] != "Unlabelled"]
# Drop nan rows
data[idx] = temp.dropna(axis=0)
# Concat data
data = | pd.concat(data) | pandas.concat |
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""Reads songs log file row by row, selects needed fields and inserts them into song and artist tables.
Parameters:
cur (psycopg2.cursor()): Cursor of the sparkifydb database
filepath (str): Filepath of the file to be analyzed
"""
# open song file
df = pd.read_json(filepath, lines=True)
for value in df.values:
artist_id, artist_latitude, artist_location, artist_longitude, artist_name, duration, num_songs, song_id, title, year = value
# insert artist record
artist_data = [artist_id, artist_name, artist_location, artist_longitude, artist_latitude]
cur.execute(artist_table_insert, artist_data)
# insert song record
song_data = [song_id, title, artist_id, year, duration]
cur.execute(song_table_insert, song_data)
def process_log_file(cur, filepath):
"""Reads user activity log file row by row, filters by NexSong, selects needed fields, transforms them and inserts
them into time, user and songplay tables.
Parameters:
cur (psycopg2.cursor()): Cursor of the sparkifydb database
filepath (str): Filepath of the file to be analyzed
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df['page']=='NextSong']
# convert timestamp column to datetime
t = | pd.to_datetime(df['ts'], unit='ms') | pandas.to_datetime |
#!/usr/bin/env python
import argparse
import pandas as pd
import re
import sys
import collections
#Read arguments
parser = argparse.ArgumentParser(description="Generate input for exint plotter")
parser.add_argument("--annotation", "-a", required=True)
parser.add_argument("--overlap", "-o", required=True)
parser.add_argument("--gene_clusters", "-c", required=True)
parser.add_argument("--ex_clusters", "-e", required=True)
parser.add_argument("--ref_prots", "-r", required=True)
parser.add_argument("--species", "-s", required=True)
parser.add_argument("--output_file", "-out", required=True)
args = parser.parse_args()
my_annot_file = args.annotation
my_overlap_file = args.overlap
my_exon_clusters = args.ex_clusters
my_gene_clusters_file = args.gene_clusters
my_ref_prots_file = args.ref_prots
my_species = args.species
my_output_file = args.output_file
################ OVERLAPPING EXONS INFO #####################
my_overlap_df = pd.read_table(my_overlap_file, sep="\t", header=None, names=["ExOverlapID", "GeneID", "ExCoords","Strand"])
my_gtf = pd.read_table(my_annot_file, sep="\t", header=None)
#Exit if gtf does not have the expected number of fields.
if my_gtf.shape[1] != 9:
sys.exit("GTF does not have the expected number of fields")
#rename GTF entries.
my_gtf = my_gtf.rename(columns={0:"Chr", 1:"Source", 2:"Feature", 3:"Start", 4:"Stop", 5:"Score", 6:"Strand", 7:"Phase", 8:"Info"})
#subset GTF to only entries of CDS exons (which automatically have the ProteinID)
#my_gtf = my_gtf.loc[my_gtf["Info"].str.contains("protein_id")]
my_gtf = my_gtf.loc[my_gtf["Feature"]=="CDS"]
#add extra into (GeneID, ProteinID, ExNum) as GTF Separate columns
my_gtf_subset = my_gtf["Info"]
my_raw_gene_id = [part for element in list(my_gtf_subset) for part in element.split(";") if "gene_id" in part]
my_gtf["GeneID"] = [re.sub(".*[ ]", "", re.sub('"', "", element)) for element in my_raw_gene_id]
# Select the first subfield containing the protein ID. Useful in case of weird GTF structure
#protein_id_subfield = list(my_gtf_subset)[0].split(";").index([element for element in list(my_gtf_subset)[0].split(";") if "protein_id" in element][0])
#my_raw_prot_id = [element.split(";")[protein_id_subfield] for element in list(my_gtf_subset)]
my_raw_prot_id = [part for element in list(my_gtf_subset) for part in element.split(";") if "protein_id" in part]
my_gtf["ProteinID"] = [re.sub(".*[ ]", "", re.sub('"', "", element)) for element in my_raw_prot_id]
#The transcriptID will be used to derive the annotation status
my_raw_transcriptID = [part for element in list(my_gtf_subset) for part in element.split(";") if "transcript_id" in part] #select transcriptID
my_gtf["TranscriptID"] = [re.sub(".*[ ]", "", re.sub('"', "", element)) for element in my_raw_transcriptID]
#The exon number is useful to define the relative position.
my_raw_exon_num = [part for element in list(my_gtf_subset) for part in element.split(";") if "exon_number" in part]
my_exon_num = [re.sub(".*[ ]", "", re.sub('"', "", element)) for element in my_raw_exon_num]
my_gtf["ExNum"] = my_exon_num
#Remove genes with exons annotated on different strands (very weird cases)
geneID_strand_df = my_gtf.loc[:,["Strand","GeneID"]].drop_duplicates() #If a gene has exons annotated on both strands, the geneID will be duplicated.
selected_geneIDs = [item for item, count in collections.Counter(list(geneID_strand_df["GeneID"])).items() if count == 1]
my_gtf = my_gtf.loc[my_gtf["GeneID"].isin(selected_geneIDs)]
#Add coordinates to GTF
#my_gtf["Coords"] = [str(element)+"-"+str(element1) for element, element1 in zip(list(my_gtf["Start"]), list(my_gtf["Stop"]))]
my_gtf["Coords"] = [str(element)+":"+str(element1)+"-"+str(element2) for element, element1, element2 in zip(list(my_gtf["Chr"]), list(my_gtf["Start"]), list(my_gtf["Stop"]))]
#Add chr to start-stop coords in the overlapping group GTF
geneID_chr_dict = pd.Series(my_gtf.Chr.values, index=my_gtf.GeneID).to_dict() #the duplicated keys are automatically overwritten
my_overlap_df["ExonID"] = [str(element)+":"+str(element1) for element, element1 in zip(list(my_overlap_df["GeneID"].map(geneID_chr_dict)), list(my_overlap_df["ExCoords"]))]
#Add frequency and exon length
my_gtf_exons = my_gtf.loc[my_gtf.Feature=="CDS"]
my_exon_freq_dict = {key : value for key, value in collections.Counter(list(my_gtf_exons["Coords"])).items()} #Create a dictionary with key=coords, value=freq
my_overlap_df["Freq"] = my_overlap_df["ExonID"].map(my_exon_freq_dict).fillna(0) #add frequency
my_overlap_df["Length"] = [int(re.sub(".*-", "",element))-int(re.sub(".*:", "", re.sub("-.*", "", element))) for element in list(my_overlap_df["ExonID"])] #add exon lenght
#Put a filter on the Freq: I think for now it is necessary because we don't have the exons from the FakeTranscripts (thus, there are exons from the clusters which have frequency 0).
my_overlap_df = my_overlap_df.loc[my_overlap_df.Freq > 0]
my_overlap_df = my_overlap_df[["GeneID", "ExOverlapID", "ExonID", "Freq", "Length"]] #Order df
################## SELECT OVERLAPPING EXONS #####################
my_overlap_df = my_overlap_df.fillna(0)
#select exons from exon clusters
#header: ExCluster_ID, GeneID, Coordinate, Species, Membership_score
exon_clusters_df = pd.read_table(str(my_exon_clusters), header=0, sep="\t")
exons_in_clusters = [re.sub(":-", "", re.sub(":\+", "", element)) for element in list(exon_clusters_df["Coordinate"])]
my_selected_overlap_df = pd.DataFrame(columns=["GeneID", "ExOverlapID", "ExonID", "Freq", "Length"])
#group by overlap ID
my_grouped_overlap_df = my_overlap_df.groupby("ExOverlapID")
for name, group in my_grouped_overlap_df:
all_exs = list(group["ExonID"])
my_ex = [ex for ex in all_exs if ex in exons_in_clusters] #select the exon in the exon clusters for each overalpping group (there should be only one).
if len(my_ex) == 1:
selected_elements_df = group.loc[group.ExonID==my_ex[0]]
else: #if none of the exons in the overlapping group make it to the exon clusters, select the most frequent form.
all_freq_list = list(group["Freq"])
max_freq = max(all_freq_list)
selected_elements_df = group.loc[group.Freq==max_freq]
if selected_elements_df.shape[0] > 1: #if there are some forms with equal frequency, select the longest.
selected_elements_df = selected_elements_df.loc[selected_elements_df.Length==max(list(selected_elements_df["Length"]))]
#header: ["GeneID", "ExOverlapID", "ExonID", "Freq", "Length"]
my_selected_overlap_df = my_selected_overlap_df.append(selected_elements_df, ignore_index=True) #add the selected element to the final dataframe.
#Print out Coords - Overlapping chosen coords file. This will be used to translate the scores from the best-hits
my_overlapID_chosenID_df = my_overlap_df.loc[:,["ExonID", "ExOverlapID"]]
#Create an ExOverlapID - ChosenID dictionary
overlapID_chosenID_dict = pd.Series(my_selected_overlap_df.ExonID.values, index=my_selected_overlap_df.ExOverlapID).to_dict()
my_overlapID_chosenID_df["ExOverlapID"] = my_overlapID_chosenID_df["ExOverlapID"].map(overlapID_chosenID_dict)
my_overlapID_chosenID_df = my_overlapID_chosenID_df.rename(columns={"ExOverlapID" : "ChosenID"})
my_overlapID_chosenID_df.to_csv(my_species+"_overlapID_chosenID.txt", sep="\t", header=True, index=False, na_rep="NA") #save to file
################## ISOLATE REF PROTEIN EXONS PHASES #####################
my_ex_int_num_df = pd.read_table(my_ref_prots_file, sep="\t", header=None, names=["GeneID", "RefProt"])
ref_proteins_list = list(my_ex_int_num_df["RefProt"])
#ref_proteins_list = [re.sub("\\|.*", "", element) for element in list(my_ex_int_num_df["RefProt"])]
my_ref_gtf = my_gtf.loc[my_gtf["ProteinID"].isin(ref_proteins_list)]
my_ref_phases_df = pd.concat([my_ref_gtf["Coords"], pd.Series(list(my_ref_gtf.iloc[:,7]))], axis=1) #get a dataframe with exonID, RefExonPhase
################## ISOLATE ALL EXONS PHASES #####################
my_all_phases_df = my_gtf.loc[:,["Coords", "Phase"]].drop_duplicates()
my_unique_coords = [key for key, value in collections.Counter(list(my_all_phases_df["Coords"])).items() if value == 1] #exons in the same phase across all isoforms
my_duplicated_coords = [key for key, value in collections.Counter(list(my_all_phases_df["Coords"])).items() if value > 1] #exons in different phases across isoforms
my_duplicated_phases = my_ref_phases_df.loc[my_ref_phases_df["Coords"].isin(my_duplicated_coords)] #select the reference phase for the exons annotated with differnet phases.
my_unique_phases = my_all_phases_df.loc[my_all_phases_df["Coords"].isin(my_unique_coords)]
my_final_phases = pd.concat([my_duplicated_phases, my_unique_phases]).sort_values(by=["Coords"])
my_final_phases.to_csv(my_output_file, sep="\t", header=False, index=False, na_rep="NA")
################## ADD STRAND AND PHASES ################
my_strand_df = my_gtf.loc[:,["Coords","Strand"]].drop_duplicates() #select only coords and strand
#Create a dictionary with key=Coords, value=strand
my_coords_strand_dict = pd.Series(my_strand_df.Strand.values, index=my_strand_df.Coords).to_dict()
my_selected_overlap_df["Strand"] = my_selected_overlap_df["ExonID"].map(my_coords_strand_dict)
#Create a dictionary with key=Coords, value=phase
my_coords_phase_dict = | pd.Series(my_final_phases.Phase.values, index=my_final_phases.Coords) | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
| pd.Index(['b'], dtype='object') | pandas.Index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.