prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
plt.rcParams['font.size'] = 6
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
graphs_path = root_path+'/graphs/'
results_path = root_path+'/results_analysis/results/'
print("root path:{}".format(root_path))
sys.path.append(root_path)
from tools.results_reader import read_two_stage, read_pure_esvr,read_pure_arma
h_arma = read_pure_arma("Huaxian")
x_arma = read_pure_arma("Xianyang")
z_arma = read_pure_arma("Zhangjiashan")
h_svr_1 = pd.read_csv(root_path+'/Huaxian/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_3 = pd.read_csv(root_path+'/Huaxian/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_5 = pd.read_csv(root_path+'/Huaxian/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
h_svr_7 = pd.read_csv(root_path+'/Huaxian/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_1 = pd.read_csv(root_path+'/Xianyang/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_3 = pd.read_csv(root_path+'/Xianyang/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_5 = pd.read_csv(root_path+'/Xianyang/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
x_svr_7 = pd.read_csv(root_path+'/Xianyang/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/1_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/3_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/5_ahead_pacf_lag12/optimal_model_results.csv')
z_svr_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/esvr/7_ahead_pacf_lag12/optimal_model_results.csv')
h_lstm_1 = pd.read_csv(root_path+'/Huaxian/projects/lstm/1_ahead/optimal/opt_pred.csv')
h_lstm_3 = pd.read_csv(root_path+'/Huaxian/projects/lstm/3_ahead/optimal/opt_pred.csv')
h_lstm_5 = pd.read_csv(root_path+'/Huaxian/projects/lstm/5_ahead/optimal/opt_pred.csv')
h_lstm_7 = pd.read_csv(root_path+'/Huaxian/projects/lstm/7_ahead/optimal/opt_pred.csv')
x_lstm_1 = pd.read_csv(root_path+'/Xianyang/projects/lstm/1_ahead/optimal/opt_pred.csv')
x_lstm_3 = pd.read_csv(root_path+'/Xianyang/projects/lstm/3_ahead/optimal/opt_pred.csv')
x_lstm_5 = pd.read_csv(root_path+'/Xianyang/projects/lstm/5_ahead/optimal/opt_pred.csv')
x_lstm_7 = pd.read_csv(root_path+'/Xianyang/projects/lstm/7_ahead/optimal/opt_pred.csv')
z_lstm_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/1_ahead/optimal/opt_pred.csv')
z_lstm_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/3_ahead/optimal/opt_pred.csv')
z_lstm_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/5_ahead/optimal/opt_pred.csv')
z_lstm_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/lstm/7_ahead/optimal/opt_pred.csv')
h_dnn_1 = pd.read_csv(root_path+'/Huaxian/projects/dnn/1_ahead/optimal/opt_pred.csv')
h_dnn_3 = pd.read_csv(root_path+'/Huaxian/projects/dnn/3_ahead/optimal/opt_pred.csv')
h_dnn_5 = pd.read_csv(root_path+'/Huaxian/projects/dnn/5_ahead/optimal/opt_pred.csv')
h_dnn_7 = pd.read_csv(root_path+'/Huaxian/projects/dnn/7_ahead/optimal/opt_pred.csv')
x_dnn_1 = pd.read_csv(root_path+'/Xianyang/projects/dnn/1_ahead/optimal/opt_pred.csv')
x_dnn_3 = pd.read_csv(root_path+'/Xianyang/projects/dnn/3_ahead/optimal/opt_pred.csv')
x_dnn_5 = pd.read_csv(root_path+'/Xianyang/projects/dnn/5_ahead/optimal/opt_pred.csv')
x_dnn_7 = pd.read_csv(root_path+'/Xianyang/projects/dnn/7_ahead/optimal/opt_pred.csv')
z_dnn_1 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/1_ahead/optimal/opt_pred.csv')
z_dnn_3 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/3_ahead/optimal/opt_pred.csv')
z_dnn_5 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/5_ahead/optimal/opt_pred.csv')
z_dnn_7 = pd.read_csv(root_path+'/Zhangjiashan/projects/dnn/7_ahead/optimal/opt_pred.csv')
h_d_1 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_d_3 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_d_5 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_d_7 = pd.read_csv(root_path+'/Huaxian_dwt/projects/esvr/db10-2/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_e_1 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_e_3 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_e_5 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_e_7 = pd.read_csv(root_path+'/Huaxian_eemd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_s_1 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_s_3 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_s_5 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_s_7 = pd.read_csv(root_path+'/Huaxian_ssa/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_v_1 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
h_v_3 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
h_v_5 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
h_v_7 = pd.read_csv(root_path+'/Huaxian_vmd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
h_m_1 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_1_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_3 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_3_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_5 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_5_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
h_m_7 = pd.read_csv(root_path+'/Huaxian_modwt/projects/esvr-wddff/db1-4/single_hybrid_7_ahead_lag12_mi_ts0.1/optimal_model_results.csv')
x_d_1 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_d_3 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_d_5 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_d_7 = pd.read_csv(root_path+'/Xianyang_dwt/projects/esvr/db10-2/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_e_1 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_e_3 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_e_5 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_e_7 = pd.read_csv(root_path+'/Xianyang_eemd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_s_1 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_s_3 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_s_5 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_s_7 = pd.read_csv(root_path+'/Xianyang_ssa/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_v_1 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_1_ahead_forecast_pacf/optimal_model_results.csv')
x_v_3 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_3_ahead_forecast_pacf/optimal_model_results.csv')
x_v_5 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_5_ahead_forecast_pacf/optimal_model_results.csv')
x_v_7 = pd.read_csv(root_path+'/Xianyang_vmd/projects/esvr/one_step_7_ahead_forecast_pacf/optimal_model_results.csv')
x_m_1 = | pd.read_csv(root_path+'/Xianyang_modwt/projects/esvr-wddff/db1-4/single_hybrid_1_ahead_lag12_mi_ts0.1/optimal_model_results.csv') | pandas.read_csv |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
| pd.testing.assert_frame_equal(testData,df,check_dtype=False) | pandas.testing.assert_frame_equal |
import requests
from bs4 import BeautifulSoup
import pandas
def getPageProperties(page):
properties = page.find_all("div", {"class": "propertyRow"})
for property in properties:
propertyData = {}
address = property.find_all("span", {"class": "propAddressCollapse"})
propertyData["Address"] = address[0].text.strip()
try:
propertyData["Locality"] = address[1].text.strip()
except (AttributeError, TypeError):
propertyData["Locality"] = None
propertyData["Price"] = property.find(
"h4", {"class": "propPrice"}).text.strip()
try:
propertyData["Beds"] = property.find("span", {"class": "infoBed"})\
.find('b').text.strip()
except (AttributeError, TypeError):
propertyData["Beds"] = None
try:
propertyData["Area"] = property.find("span", {"class": "infoSqFt"})\
.find('b').text.strip()
except (AttributeError, TypeError):
propertyData["Area"] = None
try:
propertyData["Full Baths"] = property.find("span", {"class": "infoValueFullBath"})\
.find('b').text.strip()
except (AttributeError, TypeError):
propertyData["Full Baths"] = None
try:
propertyData["Half Baths"] = property.find("span", {"class": "infoValueHalfBath"})\
.find('b').text.strip()
except (AttributeError, TypeError):
propertyData["Half Baths"] = None
for feature in property.find_all("div", {"class": "columnGroup"}):
try:
if "Lot Size" in feature.find('span', {"class": "featureGroup"}).text.strip():
propertyData["Lot Size"] = feature.find(
'span', {"class": "featureName"}).text.strip()
except (AttributeError, TypeError):
pass
propertiesData.append(propertyData)
propertiesData = []
base_url = 'http://pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s={}.html'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0', }
request = requests.get(base_url.format(0), headers=headers)
content = request.content
# First Page
soup = BeautifulSoup(content, "html.parser")
getPageProperties(soup)
# get total number of pages
page_nr = int(soup.find_all("a", {"class": "Page"})[-1].text)
# iterate the other pages
for page in range(10, page_nr*10, 10):
request = requests.get(base_url.format(str(page)), headers=headers)
content = request.content
soup = BeautifulSoup(content, "html.parser")
getPageProperties(soup)
df = | pandas.DataFrame(propertiesData) | pandas.DataFrame |
import pandas as pd
import numpy as np
from numpy import mean
from numpy import std
from numpy import NaN
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
#from xgboost import XGBRFRegressor
import lightgbm as lgb
from lightgbm import LGBMRegressor
# https://www.kaggle.com/shreyagopal/suicide-rate-prediction-with-machine-learning
#from sklearn.linear_model import LinearRegression
dat = "C:/Users/LIUM3478/OneDrive Corp/OneDrive - Atkins Ltd/Work_Atkins/Docker/hjulanalys/wheel_prediction_data.csv"
df = pd.read_csv(dat, encoding = 'ISO 8859-1', sep = ";", decimal=",")
df.head()
df.groupby(['Littera','VehicleOperatorName']).size().reset_index().rename(columns={0:'count'})
y = df[['km_till_OMS']].values
X = df[["LeftWheelDiameter", "Littera", "VehicleOperatorName",
"TotalPerformanceSnapshot", "maxTotalPerformanceSnapshot"]]
# X["Littera_Operator"] = X.Littera + " " + X.VehicleOperatorName
# X.drop(["Littera", "VehicleOperatorName"], axis = 1, inplace=True)
# converting object type to category for gradient boosting algorithms
def obj_to_cat(data):
obj_feat = list(data.loc[:, data.dtypes == 'object'].columns.values)
for feature in obj_feat:
data[feature] = | pd.Series(data[feature], dtype="category") | pandas.Series |
import pytest
from pandas.compat import pa_version_under4p0
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
pa = pytest.importorskip("pyarrow", minversion="1.0.1")
def test_arrow_extension_type():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
p1 = ArrowPeriodType("D")
p2 = ArrowPeriodType("D")
p3 = | ArrowPeriodType("M") | pandas.core.arrays._arrow_utils.ArrowPeriodType |
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
Intuition results analyzer
--------------------------
Wraps session results with convenient analyse methods
:copyright (c) 2014 <NAME>
:license: Apache 2.0, see LICENSE for more details.
'''
import pytz
import pandas as pd
import numpy as np
import dna.logging
import dna.debug
import dna.utils
from zipline.data.benchmarks import get_benchmark_returns
from intuition.finance import qstk_get_sharpe_ratio
log = dna.logging.logger(__name__)
class Analyze():
''' Handle backtest results and performances measurments '''
def __init__(self, params, results, metrics, benchmark='^GSPC'):
# NOTE Temporary
# Simulation parameters
self.sim_params = params
# Final risk measurments as returned by the backtester
self.results = results
# Simulation rolling performance
self.metrics = metrics
# Market where we traded
self.benchmark = benchmark
def build_report(self, timestamp='one_month', show=False):
# Get daily, cumulative and not, returns of portfolio and benchmark
# NOTE Temporary fix before intuition would be able to get benchmark
# data on live trading
try:
bm_sym = self.benchmark
returns_df = self.get_returns(benchmark=bm_sym)
skip = False
except:
log.warn('unable to get benchmark data on live trading for now')
skip = True
orders = 0
for order in self.results.orders:
orders += len(order)
final_value = self.results.portfolio_value[-1]
report = {
'portfolio': final_value,
'gain': final_value - self.sim_params.capital_base,
'orders': orders,
'pnl_mean': self.results.pnl.mean(),
'pnl_deviation': self.results.pnl.std(),
}
if not skip:
report['portfolio_perfs'] = returns_df['algo_c_return'][-1] * 100.0
report['benchmark_perfs'] = \
returns_df['benchmark_c_return'][-1] * 100.0
perfs = self.overall_metrics(timestamp)
for k, v in perfs.iteritems():
report[k] = v
# Float values for humans
for key, value in report.iteritems():
report[key] = dna.utils.truncate(value, 3)
log.info('generated report', report=report)
if show:
print
print(dna.debug.emphasis(report, align=True))
print
return report
def _to_perf_array(self, timestamp, key, length):
return np.array([self.metrics[timestamp][i][key] for i in length])
def rolling_performances(self, timestamp='one_month'):
''' Filters self.perfs '''
# TODO Study the impact of month choice
# TODO Check timestamp in an enumeration
# TODO Implement other benchmarks for perf computation
# (zipline issue, maybe expected)
if self.metrics:
perfs = {}
length = range(len(self.metrics[timestamp]))
index = self._get_index(self.metrics[timestamp])
perf_keys = self.metrics[timestamp][0].keys()
perf_keys.pop(perf_keys.index('period_label'))
perfs['period'] = np.array(
[pd.datetime.date(date) for date in index])
for key in perf_keys:
perfs[key] = self._to_perf_array(timestamp, key, length)
else:
# TODO Get it from DB if it exists
raise NotImplementedError()
return pd.DataFrame(perfs, index=index)
def overall_metrics(self, timestamp='one_month', metrics=None):
'''
Use zipline results to compute some performance indicators
'''
perfs = dict()
# If no rolling perfs provided, computes it
if metrics is None:
metrics = self.rolling_performances(timestamp=timestamp)
riskfree = np.mean(metrics['treasury_period_return'])
perfs['sharpe'] = qstk_get_sharpe_ratio(
metrics['algorithm_period_return'].values, risk_free=riskfree)
perfs['algorithm_period_return'] = (
((metrics['algorithm_period_return'] + 1).cumprod()) - 1)[-1]
perfs['max_drawdown'] = max(metrics['max_drawdown'])
perfs['algo_volatility'] = np.mean(metrics['algo_volatility'])
perfs['beta'] = np.mean(metrics['beta'])
perfs['alpha'] = np.mean(metrics['alpha'])
perfs['benchmark_period_return'] = (
((metrics['benchmark_period_return'] + 1).cumprod()) - 1)[-1]
return perfs
def get_returns(self, benchmark=''):
returns = {}
if benchmark:
try:
benchmark_data = (
get_benchmark_returns(benchmark,
self.results.index[0],
self.results.index[-1]))
except Exception as e:
raise KeyError(e)
else:
#TODO Automatic detection given exchange market (on command line) ?
raise NotImplementedError()
# NOTE Could be more efficient. But len(benchmark_data.date) !=
# len(self.results.returns.index). Maybe because of different markets
dates = | pd.DatetimeIndex([d.date for d in benchmark_data]) | pandas.DatetimeIndex |
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string()):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_text_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = | pd.Series(data, dtype=str) | pandas.Series |
import math
from algorithm import heft_time_reservation,heft,greedy,lbck,greedy_time_reservation,get_algorithm_timelist,set_paramerters,greedy_nlook_back,greedy_time_reservation_nlook_back,heft_n_look_back,NSGA_n_look_back
from Dataset import get_connect_task_graph,generate_randomtimeline,get_connect_multiple_task_graph,default_timewindow,lookahead_window_size
import copy
# 'Global View Greedy-Reservation','Global View HEFT','Global View Greedy',"NSGA",
# "Partial View Greedy-Reservation-Improved",'Partial View HEFT-Improved','Partial View Greedy-Improved'
ALGOR_NAME_LIST = [
"Partial View Greedy-Reservation",'Partial View HEFT','Partial View Greedy',"Partial View NSGA"
]
#
#--------------Configure-------------
# DNN FLOP:0 Google :1
defalut_tasktype = [0,0,0]
defalut_inter_num = 1
defalut_edge_nums = 4
defalut_max_edge_nums = 10
defalut_delta = 0.01
defalut_avaratio = 0.75
defalut_sigma = 0.05
defalut_start_ratio = 0.2
defalut_start_sigma = 0.05
defalut_max_cpu_capbility = 305
defalut_request_number = [200,200,200]
# --------------Metrix---------------
# get the total completion time
def get_max_time(anslist):
max_time = -1
if anslist == 2 or anslist == 3:
return max_time
for tmp in anslist:
tmp.sort(key=lambda x: x[2], reverse=True)
if len(tmp) != 0:
max_time = max(max_time, tmp[0][1])
return max_time
# get the throughput
def get_throught_ratio(anslist,deadline):
max_time = 0
if anslist == 3 or anslist == 2:
return 0
for tmp in anslist:
tmp.sort(key=lambda x: x[2], reverse=True)
if len(tmp) != 0:
max_time = max(max_time, tmp[0][2])
if deadline > max_time:
return 1
else:
return 0
# return max_time
# get algorithm running time
def get_run_time():
import time
# starttime = time.clock()
# heft_time_reservation()
# endtime = time.clock()
# heft_time_reservation_time = endtime - starttime
# starttime = time.clock()
# greedy_time_reservation()
# endtime = time.clock()
# greedy_time_reservation_time = endtime - starttime
# starttime = time.clock()
# heft()
# endtime = time.clock()
# heft_time = endtime - starttime
# starttime = time.clock()
# lbck()
# endtime = time.clock()
# lbck_time = endtime - starttime
# starttime = time.clock()
# greedy()
# endtime = time.clock()
# greedy_time = endtime - starttime
starttime = time.clock()
greedy_time_reservation_nlook_back(lookahead_window_size)
endtime = time.clock()
greedy_time_reservation_nlook_back_time = endtime - starttime
starttime = time.clock()
heft_n_look_back(lookahead_window_size)
endtime = time.clock()
heft_n_look_back_time = endtime - starttime
starttime = time.clock()
greedy_nlook_back(lookahead_window_size)
endtime = time.clock()
greedy_nlook_back_time = endtime - starttime
starttime = time.clock()
NSGA_n_look_back(lookahead_window_size)
endtime = time.clock()
NSGA_nlook_back_time = endtime - starttime
return [greedy_time_reservation_nlook_back_time,heft_n_look_back_time,greedy_nlook_back_time,NSGA_nlook_back_time]
def result_ratiocal(result_dict_list,inter_num):
avg_dict = {}
rangelen = len(result_dict_list[0][ALGOR_NAME_LIST[0]])
for i in range(len(ALGOR_NAME_LIST)):
avg_dict[ALGOR_NAME_LIST[i]] = [0 for j in range(rangelen)]
# avg_time_dict = time_dict_list[0]
for i in range(len(result_dict_list)):
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
avg_dict[key][j] += result_dict_list[i][key][j][0]
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
avg_dict[key][j] /= inter_num
return avg_dict
def result_timecal(result_dict_list,inter_num):
avg_dict = {}
tmp_dict = {}
rangelen = len(result_dict_list[0][ALGOR_NAME_LIST[0]])
for i in range(len(ALGOR_NAME_LIST)):
tmp_dict[ALGOR_NAME_LIST[i]] = [[] for j in range(rangelen)]
# avg_time_dict = time_dict_list[0]
for i in range(len(result_dict_list)):
for key in tmp_dict.keys():
for j in range(len(result_dict_list[i][key])):
if result_dict_list[i][key][j][0] != -1:
tmp_dict[key][j].append(result_dict_list[i][key][j][0])
for i in range(len(ALGOR_NAME_LIST)):
avg_dict[ALGOR_NAME_LIST[i]] = [0 for j in range(len(tmp_dict[ALGOR_NAME_LIST[i]]))]
for i in range(len(ALGOR_NAME_LIST)):
for j in range(len(tmp_dict[ALGOR_NAME_LIST[i]])):
avg_dict[ALGOR_NAME_LIST[i]][j] = sum(tmp_dict[ALGOR_NAME_LIST[i]][j])
for key in avg_dict.keys():
for j in range(len(avg_dict[key])):
if len(tmp_dict[key][j]) != 0:
avg_dict[key][j] /= len(tmp_dict[key][j])
else:
avg_dict[key][j] = -1
return avg_dict
# --------------Metrix---------------
def taskgraph_exp(data_prefix, taskgraph,**kwargs):
# from code02 import set_paramerters,get_time_list
import pandas as pd
avatimelist = []
avatime_ratio = defalut_avaratio
edge_computer_cability = []
resouce_upbound = []
time_list = [[] for i in range(len(ALGOR_NAME_LIST))]
ratio_list = [[] for i in range(len(ALGOR_NAME_LIST))]
request_number = defalut_request_number
# n_look = 30
# file_prefix = 'exp1_edge_num_change'
max_edge_num = defalut_max_edge_nums
edge_nums = defalut_edge_nums
max_cpu_capbility = defalut_max_cpu_capbility
delta = defalut_delta
mu = defalut_avaratio
ratio_sigma = defalut_sigma
window_size = default_timewindow
start_ratio = defalut_start_ratio
start_sigma = defalut_start_sigma
change_edge_num = True
if "ava_ratio" in kwargs:
avatime_ratio = kwargs['ava_ratio']
if 'max_edge_num' in kwargs:
max_edge_num = kwargs['max_edge_num']
if 'change_edge_num' in kwargs:
change_edge_num = kwargs['change_edge_num']
if 'max_cpu_capbility' in kwargs:
max_cpu_capbility = kwargs['max_cpu_capbility']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
if 'delta' in kwargs:
delta = kwargs['delta']
if 'mu' in kwargs:
mu = kwargs['mu']
if 'ratio_sigma' in kwargs:
ratio_sigma = kwargs['ratio_sigma']
if 'request_number' in kwargs:
request_number = kwargs['request_number']
if 'start_ratio' in kwargs:
start_ratio = kwargs['start_ratio']
if 'start_sigma' in kwargs:
start_sigma = kwargs['start_sigma']
if 'window_size' in kwargs:
window_size = kwargs['window_size']
task_info = None
if "task_info" in kwargs:
task_info = kwargs['task_info']
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = task_info
# set_paramerters()
if change_edge_num:
# edge_num = 3
decision_time_list = []
avatimelist = []
new_decision_time_list,new_avatimelist = generate_randomtimeline(num_edges=max_edge_num,
start_ratio=start_ratio,start_sigma=start_sigma,ava_ratio=avatime_ratio,ratio_sigma=ratio_sigma)
for edge_num in range(3, max_edge_num):
edge_num_time_list = []
# reset ava_time_list
decision_time_list = copy.deepcopy(new_decision_time_list[:edge_num])
avatimelist = copy.deepcopy(new_avatimelist[:edge_num])
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[12.5 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num, ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,window_size=window_size,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound,decision_time_list=decision_time_list)
edge_num_time_list += get_algorithm_timelist()
for i in range(len(edge_num_time_list)):
# ratio_list[i].append([get_throught_ratio(edge_num_time_list[i],deadline=defalut_deadline)])
time_list[i].append([get_max_time(edge_num_time_list[i])])
else:
edge_num = edge_nums
edge_num_time_list = []
# reset ava_time_list
if 'avatimelist' in kwargs:
avatimelist = kwargs['avatimelist']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
# else:
# avatimelist= [generate_ava_time_and_unava_time(avatime_radio, 20, 300) for i in range(edge_num)]
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[12.5 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num,window_size=window_size,
ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,decision_time_list=decision_time_list,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound)
# tmptimelist = get_time_list()
edge_num_time_list += get_algorithm_timelist()
for i in range(len(edge_num_time_list)):
# ratio_list[i].append(get_throught_ratio(edge_num_time_list[i],deadline=defalut_deadline))
time_list[i].append(get_max_time(edge_num_time_list[i]))
time_dict = {}
for i in range(len(time_list)):
time_dict[ALGOR_NAME_LIST[i]] = time_list[i]
# ratio_dict = {}
# for i in range(len(ratio_list)):
# ratio_dict[ALGOR_NAME_LIST[i]] = ratio_list[i]
return time_dict
def taskgraph_exp_runtime(data_prefix, taskgraph,**kwargs):
'''
running time exp
* edge_num
* ava_time_list
* random_time
* W
* edge_computer_capbility
* resource_upbound
:return:
'''
# from code02 import set_paramerters,get_time_list
import pandas as pd
avatimelist = []
avatime_ratio = defalut_avaratio
sigma = defalut_sigma
edge_computer_cability = []
resouce_upbound = []
runtime_list = [[] for i in range(len(ALGOR_NAME_LIST))]
max_edge_num = defalut_max_edge_nums
edge_nums = defalut_edge_nums
max_cpu_capbility = defalut_max_cpu_capbility
delta = defalut_delta
window_size = default_timewindow
start_ratio = defalut_start_ratio
start_sigma= defalut_start_sigma
request_number = defalut_request_number
change_edge_num = True
# set big task graph paramerters
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = get_connect_multiple_task_graph(request_number,taskgraph,tasktype=defalut_tasktype)
# if 'n_look' in kwargs:
# n_look = kwargs['n_look']
if 'max_edge_num' in kwargs:
max_edge_num = kwargs['max_edge_num']
if 'change_edge_num' in kwargs:
change_edge_num = kwargs['change_edge_num']
if 'max_cpu_capbility' in kwargs:
max_cpu_capbility = kwargs['max_cpu_capbility']
if 'decision_time_list' in kwargs:
decision_time_list = kwargs['decision_time_list']
if 'delta' in kwargs:
delta = kwargs['delta']
if 'sigma' in kwargs:
sigma = kwargs['sigma']
if 'window_size' in kwargs:
window_size = kwargs['window_size']
task_info = None
if "task_info" in kwargs:
task_info = kwargs['task_info']
pre,succ,workload,datasize,taskindex2order_map,order2taskindex_map,order2subtaskindex_map = task_info
# set_paramerters()
# edge_num = 3
decision_time_list = []
avatimelist = []
new_decision_time_list,new_avatimelist = generate_randomtimeline(num_edges=max_edge_num,
start_ratio=start_ratio,start_sigma=start_sigma,ava_ratio=avatime_ratio,ratio_sigma=sigma)
for edge_num in range(3, max_edge_num):
edge_num_time_list = []
# reset ava_time_list
decision_time_list = copy.deepcopy(new_decision_time_list[:edge_num])
avatimelist = copy.deepcopy(new_avatimelist[:edge_num])
# reset random time
random_time = [[delta for i in range(len(workload))] for i in range(edge_num)]
# reset W
W = [[100 for i in range(edge_num)] for i in range(edge_num)]
# reset edge_computer_capblity
edge_computer_cability = [max_cpu_capbility for i in range(edge_num)]
# reset resouce upbound
resouce_upbound = []
for tmpava_bydevice in avatimelist:
tmpsum = 0
for tmpinterval in tmpava_bydevice:
tmplen = tmpinterval[1] - tmpinterval[0]
tmpsum = tmpsum + tmplen
resouce_upbound.append(tmpsum)
set_paramerters(workload=workload, datasize=datasize, pre=pre, succ=succ, num_edges=edge_num,window_size=window_size,
ava_time_list=avatimelist, random_time=random_time, bandwidth_edge=W,decision_time_list=decision_time_list,
taskindex2order_map=taskindex2order_map,order2taskindex_map=order2taskindex_map,order2subtaskindex_map=order2subtaskindex_map,
edge_computer_capability=edge_computer_cability, resouce_upbound=resouce_upbound)
# tmptimelist = get_time_list()
edge_num_time_list += get_run_time()
for i in range(len(edge_num_time_list)):
runtime_list[i].append([edge_num_time_list[i]])
runtime_dict = {}
for i in range(len(runtime_list)):
runtime_dict[ALGOR_NAME_LIST[i]] = runtime_list[i]
return runtime_dict
# with the processing capacity of processor
def exp_2_graph(taskgraphtype, expprefix):
import pandas as pd
from tqdm import tqdm
time_dict_list = []
# ratio_dict_list = []
inter_num = defalut_inter_num
new_max_cpu_capbility = 300
# set big task graph paramerters
task_info = get_connect_multiple_task_graph(defalut_request_number,taskgraphtype,tasktype=defalut_tasktype)
for i in tqdm(range(inter_num)):
tmptimedict = {}
# tmpratiodict = {}
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname] = []
# for tmpalgorname in ALGOR_NAME_LIST:
# tmpratiodict[tmpalgorname] = []
decision_time_list,avatimelist = generate_randomtimeline(num_edges=defalut_edge_nums,
start_ratio=defalut_start_ratio,start_sigma=defalut_start_sigma,
ava_ratio=defalut_avaratio,ratio_sigma=defalut_sigma)
for max_cpu_capbility in range(new_max_cpu_capbility, 600,30):
# avatimelist = [generate_ava_time_by_jieduan(0.5, 20, 400, mu=5, sigma=5) for k in range(5)]
# print("avatimelist:")
# print(avatimelist)
# print("decision_time_list:")
# print(decision_time_list)
time_dict = taskgraph_exp("graph_iteration_{0}_{1}".format(i + 1, expprefix), taskgraphtype,
task_info = task_info,
max_edge_num=defalut_edge_nums,
avatimelist=avatimelist,
decision_time_list=decision_time_list,
max_cpu_capbility=max_cpu_capbility,
change_edge_num=False)
for tmpalgorname in ALGOR_NAME_LIST:
tmptimedict[tmpalgorname].append(time_dict[tmpalgorname])
# tmpratiodict[tmpalgorname].append(ratio_dict[tmpalgorname])
time_dict_list.append(tmptimedict)
# ratio_dict_list.append(tmpratiodict)
avg_time_dict = result_timecal(time_dict_list,inter_num=inter_num)
# avg_ratio_dict = result_ratiocal(ratio_dict_list,inter_num=inter_num)
# print(avg_time_dict)
df = | pd.DataFrame(data=avg_time_dict) | pandas.DataFrame |
# coding: utf-8
# # Imported Modules
# In[1]:
import os
import sys
import pandas as pd
import numpy as np
import seaborn as sns
from pandas import DataFrame as df
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import DBSCAN
from sklearn.cluster import AffinityPropagation
from sklearn.model_selection import RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from matplotlib import pyplot as plt
# # All functions
# In[2]:
def run_tSNE(try_id, data):
X_embedded = TSNE(
n_components=2,
init='pca').fit_transform(data)
with open('corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv', 'w') as f:
f.write(',tSNE-1,tSNE-2')
f.write('\n')
for i in range(len(X_embedded)):
f.write(data.index[i] + ',' + str(X_embedded[i][0]) + ',' + str(X_embedded[i][1]) + '\n')
return
def visual_tSNE(try_id, data, label):
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
plt.subplots(figsize=(8, 8))
if label is None:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c='grey', linewidths=0)
else:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c=data[label], linewidths=0,
vmin=-1, vmax=1, cmap=plt.cm.bwr)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
return
def visual_sub_tSNE(try_id, subset, label):
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
coordinates = df(coordinates.loc[subset.index, :])
plt.subplots(figsize=(8, 8))
if label is None:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c='grey', linewidths=0)
else:
plt.scatter(
coordinates['tSNE-1'], coordinates['tSNE-2'],
s=20, c=subset[label], linewidths=0,
vmin=-1, vmax=1, cmap=plt.cm.bwr)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
return
def run_AP(try_id, data):
clustering = AffinityPropagation().fit(data)
label_lncRNAs = df(index=data.index, columns=['label_assigned'])
label_lncRNAs['label_assigned'] = clustering.labels_
label_lncRNAs.to_csv('corr_68sig_linc/tSNE_CR_score/clustering/AP_' + try_id + '.csv', sep=',')
return label_lncRNAs
def run_DBSCAN(try_id, subset, eps, min_samples):
# read in tSNE embedding coordinates
coordinates = pd.read_csv(
'corr_68sig_linc/tSNE_CR_score/tSNE_embedding_' + try_id + '.csv',
header=0,
index_col=0,
sep=',')
if subset != None:
coordinates = df(coordinates.loc[subset.index, :])
# scaled to [0, 1]
coordinates['tSNE-1'] = (coordinates['tSNE-1'] - coordinates['tSNE-1'].min()) / (coordinates['tSNE-1'].max() - coordinates['tSNE-1'].min())
coordinates['tSNE-2'] = (coordinates['tSNE-2'] - coordinates['tSNE-2'].min()) / (coordinates['tSNE-2'].max() - coordinates['tSNE-2'].min())
# input hyperparameter
db = DBSCAN(eps=eps, min_samples=min_samples).fit(coordinates)
# initial assign
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
label_cell = df(index=coordinates.index, columns=['cluster'])
label_cell['cluster'] = labels
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# visualize
plt.subplots(figsize=(10, 10))
plt.scatter(coordinates['tSNE-1'], coordinates['tSNE-2'], c=label_cell['cluster'], s=20, linewidths=0, cmap='Dark2')
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.axvline(x=coordinates.loc['EPIC1', 'tSNE-1'], ls=':')
plt.axhline(y=coordinates.loc['EPIC1', 'tSNE-2'], ls=':')
plt.show()
print('EPIC1 is in ' + str(label_cell.loc['EPIC1', :]))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(coordinates, labels))
return label_cell
def number_cluster(label_cell):
# show number of genes in each cluster
for c in label_cell['cluster'].unique():
print('cluster ' + str(c))
print(len(label_cell[label_cell['cluster'] == c].index))
return
def report_KNN(results, n_top, try_id):
f = open('corr_68sig_linc/classifier/' + try_id + '_KNN_hyper_parameter_selection.txt', 'w')
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
f.write("Model with rank: {0}".format(i))
f.write('\n')
f.write("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
f.write('\n')
f.write("Parameters: {0}".format(results['params'][candidate]))
f.write('\n')
f.write("")
return
def hyperpara_KNN(target, training, try_id):
Y = target[target['cluster'] != -1]
X = df(training.loc[Y.index, :])
# select KNN for the following training
clf = KNeighborsClassifier(p=2)
# specify parameters and distributions to sample from
param_dist = {"n_neighbors": np.arange(5, 50, 5),
"leaf_size": np.arange(30, 80, 5),
"weights": ['uniform', 'distance']}
# run randomized search
n_iter_search = 50
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, cv=5,
random_state=0,
refit=True)
random_search.fit(np.array(X.values), np.ravel(Y.values))
report_KNN(results=random_search.cv_results_, n_top=10, try_id=try_id)
return
def final_KNN(target, training, n_neighbours, leaf_size, weights):
Y = target[target['cluster'] != -1]
X = df(training.loc[Y.index, :])
# construction of final model
clf = KNeighborsClassifier(
n_neighbors=n_neighbours,
leaf_size=leaf_size,
weights=weights,
p=2)
class_label = Y['cluster'].unique()
class_label.sort()
# evaluate by 5-fold cross-validation
score = cross_val_score(clf, np.array(X.values), np.ravel(Y.values), cv=5)
kf = KFold(n_splits=5)
for train_index, test_index in kf.split(np.array(X.values)):
X_train, X_test = np.array(X.values)[train_index], np.array(X.values)[test_index]
y_train, y_test = np.ravel(Y.values)[train_index], np.ravel(Y.values)[test_index]
clf_fit = clf.fit(X_train, y_train)
y_true = np.ravel(y_test)
y_pred = clf_fit.predict(X_test)
cmatrix = confusion_matrix(y_true, y_pred, labels=class_label)
cmatrix = cmatrix.astype(float) / cmatrix.sum(axis=1)[:, np.newaxis]
cmatrix_frame = df(cmatrix, index=class_label, columns=class_label)
# visualize the confusion matrix
sns.heatmap(cmatrix)
plt.show()
# prediction
X_pred = | df(training.loc[target[target['cluster'] == -1].index, :]) | pandas.DataFrame |
from pandas import read_csv, Series, DataFrame as df, read_csv
from ast import literal_eval as lev
from numpy import array, ndarray, sqrt
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error
from sklearn.utils import shuffle
from warnings import filterwarnings as fw; fw("ignore")
from seaborn import heatmap as hm
from time import time
from matplotlib.pyplot import figure, xlabel, ylabel, title, savefig, tight_layout
def data_preprocessing(dataset_path, split=True, split_ratio=.85, *args, **kwargs):
dataset = shuffle(read_csv(dataset_path), random_state=43)
dataset["snps"] = [lev(i) for i in dataset["snps"]]
for i in dataset.index:
dataset.snps[i].append(int(dataset.location[i]))
dataset.snps[i].append(int(dataset.sample_id[i]))
X, y = dataset['snps'], dataset['rice_yield']
if split:
split = int(len(X)*split_ratio)
X_train, X_test = X[:split], X[split:]
X_train, X_test = array(X_train.tolist()), array(X_test.tolist())
y_train, y_test = y[:split], y[split:]
else:
X_train, X_test = array(X.tolist()), 0
y_train, y_test = y, 0
return dict({
"X_train" : X_train,
"X_test" : X_test,
"y_train" : y_train,
"y_test" : y_test
})
def elasticNet(data, a=.1, ratio=.5, intercept=True, coef=True, *args, **kwargs):
# Fit the Elastic Net model
reg_en = ElasticNet(alpha=a, l1_ratio=ratio, random_state=43).fit(data["X_train"], data["y_train"])
intercept_ = reg_en.intercept_ if intercept==True else None
coef_ = reg_en.coef_ if coef==True else None
# Prediction
if isinstance(data["X_test"], ndarray) and isinstance(data["y_test"], Series):
y_predict = reg_en.predict(data["X_test"])
residual = [y-y_hat for (y, y_hat) in zip(data["y_test"], y_predict)]
mse_test = mean_squared_error(data["y_test"], y_predict)
mbe_test = sum(residual) / len(data["y_test"])
msle_test = mean_squared_log_error(data["y_test"], [0 if i < 0 else i for i in y_predict])
mae_test = mean_absolute_error(data["y_test"], y_predict)
smape_test = 1 / len(data["y_test"]) * sum(list(map(lambda x, y: x/y, [abs(i) for i in residual], [(y+y_hat)/2 for (y, y_hat) in zip(data["y_test"], y_predict)])))
else:
y_predict, mse_test, r2_test, msle_test, residual = None, None, None, None, None
return dict({
"coef" : coef_,
"MSE" : round(mse_test, 5),
"RMSE" : round(sqrt(mse_test), 5),
"MBE" : round(mbe_test, 5),
"MAE" : round(mae_test, 5),
"MSLE" : round(msle_test, 5),
"SMAPE" : round(smape_test, 5)
}), (data["y_test"].tolist(), y_predict.tolist())
score, res = elasticNet(data_preprocessing("./data/gp_table.csv"), a=.4, ratio=.05)
score, _ = elasticNet(data_preprocessing("data/gp_table_significant_snps_exp1.csv"), a=.05, ratio=.95)
score, _ = elasticNet(data_preprocessing("data/gp_table_significant_snps_exp2.csv"), a=.05, ratio=.95)
score, _ = elasticNet(data_preprocessing("data/gp_table_significant_snps_exp3.csv"), a=.05, ratio=.95)
mse, rmse, mbe, mae, msle, smape = {}, {}, {}, {}, {}, {}
start_time = time()
for l1_ratio in l1_ratio_values:
print("- Ratio {}".format(l1_ratio))
mse_list, rmse_list, mbe_list, mae_list, msle_list, smape_list = [], [], [], [], [], []
for alpha in alpha_constant_values:
result = elasticNet(data_preprocessing("./data/gp_table.csv"), a=alpha, ratio=l1_ratio)
mse_list.append(result["MSE"])
rmse_list.append(result["RMSE"])
mbe_list.append(result["MBE"])
mae_list.append(result["MAE"])
msle_list.append(result["MSLE"])
smape_list.append(result["SMAPE"])
print("\tAlpha {} - MSE: {:.5f} | RMSE: {:.5f} | MBE: {:.5f} | MAE: {:.5f} | MSLE: {:.5f} | SMAPE: {:.5f}".format(
alpha, result["MSE"], result["RMSE"], result["MBE"], result["MAE"], result["MSLE"], result["SMAPE"]))
mse[l1_ratio] = mse_list
rmse[l1_ratio] = rmse_list
mbe[l1_ratio] = mbe_list
mae[l1_ratio] = mae_list
msle[l1_ratio] = msle_list
smape[l1_ratio] = smape_list
print("=" * 25)
print("Total exc time: {:.3} s".format(time()-start_time))
print("=" * 25)
# Save as DataFrame
| df(mse, index=alpha_constant_values) | pandas.DataFrame |
from inspect import isclass
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Datetime
import featuretools as ft
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator
)
from featuretools.primitives import (
Absolute,
AddNumeric,
AddNumericScalar,
Age,
Count,
Day,
Diff,
DivideByFeature,
DivideNumeric,
DivideNumericScalar,
Equal,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
Haversine,
Hour,
IsIn,
IsNull,
Latitude,
LessThanEqualToScalar,
LessThanScalar,
Longitude,
Minute,
Mode,
Month,
MultiplyNumeric,
MultiplyNumericScalar,
Not,
NotEqual,
NotEqualScalar,
NumCharacters,
NumWords,
Percentile,
ScalarSubtractNumericFeature,
Second,
SubtractNumeric,
SubtractNumericScalar,
Sum,
TimeSince,
TransformPrimitive,
Year,
get_transform_primitives
)
from featuretools.primitives.base import make_trans_primitive
from featuretools.primitives.utils import (
PrimitivesDeserializer,
serialize_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.tests.testing_utils import feature_with_name, to_pandas
from featuretools.utils.gen_utils import Library
from featuretools.utils.koalas_utils import pd_to_ks_clean
def test_init_and_name(es):
log = es['log']
rating = ft.Feature(ft.IdentityFeature(es["products"].ww["rating"]), "log")
log_features = [ft.Feature(es['log'].ww[col]) for col in log.columns] +\
[ft.Feature(rating, primitive=GreaterThanScalar(2.5)),
ft.Feature(rating, primitive=GreaterThanScalar(3.5))]
# Add Timedelta feature
# features.append(pd.Timestamp.now() - ft.Feature(log['datetime']))
customers_features = [ft.Feature(es["customers"].ww[col]) for col in es["customers"].columns]
# check all transform primitives have a name
for attribute_string in dir(ft.primitives):
attr = getattr(ft.primitives, attribute_string)
if isclass(attr):
if issubclass(attr, TransformPrimitive) and attr != TransformPrimitive:
assert getattr(attr, "name") is not None
trans_primitives = get_transform_primitives().values()
# If Dask EntitySet use only Dask compatible primitives
if es.dataframe_type == Library.DASK.value:
trans_primitives = [prim for prim in trans_primitives if Library.DASK in prim.compatibility]
if es.dataframe_type == Library.KOALAS.value:
trans_primitives = [prim for prim in trans_primitives if Library.KOALAS in prim.compatibility]
for transform_prim in trans_primitives:
# skip automated testing if a few special cases
features_to_use = log_features
if transform_prim in [NotEqual, Equal]:
continue
if transform_prim in [Age]:
features_to_use = customers_features
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features_to_use)
else:
matching_inputs = match(input_types, features_to_use)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for prim in matching_inputs:
instance = ft.Feature(prim, primitive=transform_prim)
# try to get name and calculate
instance.get_name()
ft.calculate_feature_matrix([instance], entityset=es)
def test_relationship_path(es):
f = ft.TransformFeature(ft.Feature(es['log'].ww['datetime']), Hour)
assert len(f.relationship_path) == 0
def test_serialization(es):
value = ft.IdentityFeature(es['log'].ww['value'])
primitive = ft.primitives.MultiplyNumericScalar(value=2)
value_x2 = ft.TransformFeature(value, primitive)
dictionary = {
'name': None,
'base_features': [value.unique_name()],
'primitive': serialize_primitive(primitive),
}
assert dictionary == value_x2.get_arguments()
assert value_x2 == \
ft.TransformFeature.from_dictionary(dictionary, es,
{value.unique_name(): value},
PrimitivesDeserializer())
def test_make_trans_feat(es):
f = ft.Feature(es['log'].ww['datetime'], primitive=Hour)
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == 10
@pytest.fixture
def pd_simple_es():
df = pd.DataFrame({
'id': range(4),
'value': pd.Categorical(['a', 'c', 'b', 'd']),
'value2': pd.Categorical(['a', 'b', 'a', 'd']),
'object': ['time1', 'time2', 'time3', 'time4'],
'datetime': pd.Series([pd.Timestamp('2001-01-01'),
pd.Timestamp('2001-01-02'),
pd.Timestamp('2001-01-03'),
pd.Timestamp('2001-01-04')])
})
es = ft.EntitySet('equal_test')
es.add_dataframe(dataframe_name='values', dataframe=df, index='id')
return es
@pytest.fixture
def dd_simple_es(pd_simple_es):
dataframes = {}
for df in pd_simple_es.dataframes:
dataframes[df.ww.name] = (dd.from_pandas(df.reset_index(drop=True), npartitions=4),
df.ww.index,
None,
df.ww.logical_types)
relationships = [(rel.parent_name,
rel._parent_column_name,
rel.child_name,
rel._child_column_name) for rel in pd_simple_es.relationships]
return ft.EntitySet(id=pd_simple_es.id, dataframes=dataframes, relationships=relationships)
@pytest.fixture
def ks_simple_es(pd_simple_es):
ks = pytest.importorskip('databricks.koalas', reason="Koalas not installed, skipping")
dataframes = {}
for df in pd_simple_es.dataframes:
cleaned_df = pd_to_ks_clean(df).reset_index(drop=True)
dataframes[df.ww.name] = (ks.from_pandas(cleaned_df),
df.ww.index,
None,
df.ww.logical_types)
relationships = [(rel.parent_name,
rel._parent_column_name,
rel.child_name,
rel._child_column_name) for rel in pd_simple_es.relationships]
return ft.EntitySet(id=pd_simple_es.id, dataframes=dataframes, relationships=relationships)
@pytest.fixture(params=['pd_simple_es', 'dd_simple_es', 'ks_simple_es'])
def simple_es(request):
return request.getfixturevalue(request.param)
def test_equal_categorical(simple_es):
f1 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['value']),
ft.IdentityFeature(simple_es['values'].ww['value2'])],
primitive=Equal)
df = ft.calculate_feature_matrix(entityset=simple_es, features=[f1])
if simple_es.dataframe_type != Library.KOALAS.value:
# Koalas does not support categorical dtype
assert set(simple_es['values']['value'].cat.categories) != \
set(simple_es['values']['value2'].cat.categories)
assert to_pandas(df, index='id', sort_index=True)['value = value2'].to_list() == [True, False, False, True]
def test_equal_different_dtypes(simple_es):
f1 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['object']),
ft.IdentityFeature(simple_es['values'].ww['datetime'])],
primitive=Equal)
f2 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['datetime']),
ft.IdentityFeature(simple_es['values'].ww['object'])],
primitive=Equal)
# verify that equals works for different dtypes regardless of order
df = ft.calculate_feature_matrix(entityset=simple_es, features=[f1, f2])
assert to_pandas(df, index='id', sort_index=True)['object = datetime'].to_list() == [False, False, False, False]
assert to_pandas(df, index='id', sort_index=True)['datetime = object'].to_list() == [False, False, False, False]
def test_not_equal_categorical(simple_es):
f1 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['value']),
ft.IdentityFeature(simple_es['values'].ww['value2'])],
primitive=NotEqual)
df = ft.calculate_feature_matrix(entityset=simple_es, features=[f1])
if simple_es.dataframe_type != Library.KOALAS.value:
# Koalas does not support categorical dtype
assert set(simple_es['values']['value'].cat.categories) != \
set(simple_es['values']['value2'].cat.categories)
assert to_pandas(df, index='id', sort_index=True)['value != value2'].to_list() == [False, True, True, False]
def test_not_equal_different_dtypes(simple_es):
f1 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['object']),
ft.IdentityFeature(simple_es['values'].ww['datetime'])],
primitive=NotEqual)
f2 = ft.Feature([ft.IdentityFeature(simple_es['values'].ww['datetime']),
ft.IdentityFeature(simple_es['values'].ww['object'])],
primitive=NotEqual)
# verify that equals works for different dtypes regardless of order
df = ft.calculate_feature_matrix(entityset=simple_es, features=[f1, f2])
assert to_pandas(df, index='id', sort_index=True)['object != datetime'].to_list() == [True, True, True, True]
assert to_pandas(df, index='id', sort_index=True)['datetime != object'].to_list() == [True, True, True, True]
def test_diff(pd_es):
value = ft.Feature(pd_es['log'].ww['value'])
customer_id_feat = ft.Feature(pd_es['sessions'].ww['customer_id'], 'log')
diff1 = ft.Feature(value, groupby=ft.Feature(pd_es['log'].ww['session_id']), primitive=Diff)
diff2 = ft.Feature(value, groupby=customer_id_feat, primitive=Diff)
feature_set = FeatureSet([diff1, diff2])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array(range(15)))
val1 = df[diff1.get_name()].tolist()
val2 = df[diff2.get_name()].tolist()
correct_vals1 = [
np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7
]
correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]
for i, v in enumerate(val1):
v1 = val1[i]
if np.isnan(v1):
assert (np.isnan(correct_vals1[i]))
else:
assert v1 == correct_vals1[i]
v2 = val2[i]
if np.isnan(v2):
assert (np.isnan(correct_vals2[i]))
else:
assert v2 == correct_vals2[i]
def test_diff_single_value(pd_es):
diff = ft.Feature(pd_es['stores'].ww['num_square_feet'], groupby=ft.Feature(pd_es['stores'].ww[u'région_id']), primitive=Diff)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([4]))
assert df[diff.get_name()][4] == 6000.0
def test_diff_reordered(pd_es):
sum_feat = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Sum)
diff = ft.Feature(sum_feat, primitive=Diff)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([4, 2]))
assert df[diff.get_name()][4] == 16
assert df[diff.get_name()][2] == -6
def test_diff_single_value_is_nan(pd_es):
diff = ft.Feature(pd_es['stores'].ww['num_square_feet'], groupby=ft.Feature(pd_es['stores'].ww[u'région_id']), primitive=Diff)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([5]))
assert df.shape[0] == 1
assert df[diff.get_name()].dropna().shape[0] == 0
def test_compare_of_identity(es):
to_test = [(EqualScalar, [False, False, True, False]),
(NotEqualScalar, [True, True, False, True]),
(LessThanScalar, [True, True, False, False]),
(LessThanEqualToScalar, [True, True, True, False]),
(GreaterThanScalar, [False, False, False, True]),
(GreaterThanEqualToScalar, [False, False, True, True])]
features = []
for test in to_test:
features.append(ft.Feature(es['log'].ww['value'], primitive=test[0](10)))
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3]),
index='id',
sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_direct(es):
log_rating = ft.Feature(es['products'].ww['rating'], 'log')
to_test = [(EqualScalar, [False, False, False, False]),
(NotEqualScalar, [True, True, True, True]),
(LessThanScalar, [False, False, False, True]),
(LessThanEqualToScalar, [False, False, False, True]),
(GreaterThanScalar, [True, True, True, False]),
(GreaterThanEqualToScalar, [True, True, True, False])]
features = []
for test in to_test:
features.append(ft.Feature(log_rating, primitive=test[0](4.5)))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_transform(es):
day = ft.Feature(es['log'].ww['datetime'], primitive=Day)
to_test = [(EqualScalar, [False, True]),
(NotEqualScalar, [True, False]),
(LessThanScalar, [True, False]),
(LessThanEqualToScalar, [True, True]),
(GreaterThanScalar, [False, False]),
(GreaterThanEqualToScalar, [False, True])]
features = []
for test in to_test:
features.append(ft.Feature(day, primitive=test[0](10)))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 14])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_agg(es):
count_logs = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)
to_test = [(EqualScalar, [False, False, False, True]),
(NotEqualScalar, [True, True, True, False]),
(LessThanScalar, [False, False, True, False]),
(LessThanEqualToScalar, [False, False, True, True]),
(GreaterThanScalar, [True, True, False, False]),
(GreaterThanEqualToScalar, [True, True, False, True])]
features = []
for test in to_test:
features.append(ft.Feature(count_logs, primitive=test[0](2)))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_all_nans(es):
if es.dataframe_type != Library.PANDAS.value:
nan_feat = ft.Feature(es['log'].ww['value'], parent_dataframe_name='sessions', primitive=ft.primitives.Min)
compare = nan_feat == 0.0
else:
nan_feat = ft.Feature(es['log'].ww['product_id'], parent_dataframe_name='sessions', primitive=Mode)
compare = nan_feat == 'brown bag'
# before all data
time_last = pd.Timestamp('1/1/1993')
df = ft.calculate_feature_matrix(entityset=es, features=[nan_feat, compare], instance_ids=[0, 1, 2], cutoff_time=time_last)
df = to_pandas(df, index='id', sort_index=True)
assert df[nan_feat.get_name()].dropna().shape[0] == 0
assert not df[compare.get_name()].any()
def test_arithmetic_of_val(es):
to_test = [(AddNumericScalar, [2.0, 7.0, 12.0, 17.0]),
(SubtractNumericScalar, [-2.0, 3.0, 8.0, 13.0]),
(ScalarSubtractNumericFeature, [2.0, -3.0, -8.0, -13.0]),
(MultiplyNumericScalar, [0, 10, 20, 30]),
(DivideNumericScalar, [0, 2.5, 5, 7.5]),
(DivideByFeature, [np.inf, 0.4, 0.2, 2 / 15.0])]
features = []
for test in to_test:
features.append(ft.Feature(es['log'].ww['value'], primitive=test[0](2)))
features.append(ft.Feature(es['log'].ww['value']) / 0)
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for f, test in zip(features, to_test):
v = df[f.get_name()].tolist()
assert v == test[1]
test = [np.nan, np.inf, np.inf, np.inf]
v = df[features[-1].get_name()].tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1:]
def test_arithmetic_two_vals_fails(es):
error_text = "Not a feature"
with pytest.raises(Exception, match=error_text):
ft.Feature([2, 2], primitive=AddNumeric)
def test_arithmetic_of_identity(es):
to_test = [(AddNumeric, [0., 7., 14., 21.]),
(SubtractNumeric, [0, 3, 6, 9]),
(MultiplyNumeric, [0, 10, 40, 90]),
(DivideNumeric, [np.nan, 2.5, 2.5, 2.5])]
# SubtractNumeric not supported for Koalas EntitySets
if es.dataframe_type == Library.KOALAS.value:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(ft.Feature([ft.Feature(es['log'].ww['value']), ft.Feature(es['log'].ww['value_2'])], primitive=test[0]))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].tolist()
assert v == test[1]
i, test = -1, to_test[-1]
v = df[features[i].get_name()].tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = ft.Feature(es['products'].ww['rating'])
log_rating = ft.Feature(rating, 'log')
customer_age = ft.Feature(es['customers'].ww['age'])
session_age = ft.Feature(customer_age, 'sessions')
log_age = ft.Feature(session_age, 'log')
to_test = [(AddNumeric, [38, 37, 37.5, 37.5]),
(SubtractNumeric, [28, 29, 28.5, 28.5]),
(MultiplyNumeric, [165, 132, 148.5, 148.5]),
(DivideNumeric, [6.6, 8.25, 22. / 3, 22. / 3])]
if es.dataframe_type == Library.KOALAS.value:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(ft.Feature([log_age, log_rating], primitive=test[0]))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 3, 5, 7])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
# Koalas EntitySets do not support boolean multiplication
@pytest.fixture(params=['pd_boolean_mult_es', 'dask_boolean_mult_es'])
def boolean_mult_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_boolean_mult_es():
es = ft.EntitySet()
df = pd.DataFrame({"index": [0, 1, 2],
"bool": pd.Series([True, False, True]),
"numeric": [2, 3, np.nan]})
es.add_dataframe(dataframe_name="test",
dataframe=df,
index="index")
return es
@pytest.fixture
def dask_boolean_mult_es(pd_boolean_mult_es):
dataframes = {}
for df in pd_boolean_mult_es.dataframes:
dataframes[df.ww.name] = (dd.from_pandas(df, npartitions=2), df.ww.index, None, df.ww.logical_types)
return ft.EntitySet(id=pd_boolean_mult_es.id, dataframes=dataframes)
def test_boolean_multiply(boolean_mult_es):
es = boolean_mult_es
to_test = [
('numeric', 'numeric'),
('numeric', 'bool'),
('bool', 'numeric'),
('bool', 'bool')
]
features = []
for row in to_test:
features.append(ft.Feature(es["test"].ww[row[0]]) * ft.Feature(es["test"].ww[row[1]]))
fm = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features))
df = to_pandas(es['test'])
for row in to_test:
col_name = '{} * {}'.format(row[0], row[1])
if row[0] == 'bool' and row[1] == 'bool':
assert fm[col_name].equals((df[row[0]] & df[row[1]]).astype('boolean'))
else:
assert fm[col_name].equals(df[row[0]] * df[row[1]])
# TODO: rework test to be Dask and Koalas compatible
def test_arithmetic_of_transform(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail("Test uses Diff which is not supported in Dask or Koalas")
diff1 = ft.Feature([ft.Feature(es['log'].ww['value'])], primitive=Diff)
diff2 = ft.Feature([ft.Feature(es['log'].ww['value_2'])], primitive=Diff)
to_test = [(AddNumeric, [np.nan, 7., -7., 10.]),
(SubtractNumeric, [np.nan, 3., -3., 4.]),
(MultiplyNumeric, [np.nan, 10., 10., 21.]),
(DivideNumeric, [np.nan, 2.5, 2.5, 2.3333333333333335])]
features = []
for test in to_test:
features.append(ft.Feature([diff1, diff2], primitive=test[0]()))
feature_set = FeatureSet(features)
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = calculator.run(np.array([0, 2, 12, 13]))
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
not_feat = ft.Feature(es['customers'].ww['loves_ice_cream'], primitive=Not)
features = [not_feat]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1]))
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = ft.Feature(es['customers'].ww['id'])
store_id_feat = ft.Feature(es['stores'].ww['id'])
count_customer = ft.Feature(customer_id_feat, parent_dataframe_name=u'régions', primitive=Count)
count_stores = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)
to_test = [(AddNumeric, [6, 2]),
(SubtractNumeric, [0, -2]),
(MultiplyNumeric, [9, 0]),
(DivideNumeric, [1, 0])]
# Skip SubtractNumeric for Koalas as it's unsupported
if es.dataframe_type == Library.KOALAS.value:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(ft.Feature([count_customer, count_stores], primitive=test[0]()))
ids = ['United States', 'Mexico']
df = ft.calculate_feature_matrix(entityset=es, features=features,
instance_ids=ids)
df = to_pandas(df, index='id', sort_index=True)
df = df.loc[ids]
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_latlong(pd_es):
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
latitude = ft.Feature(log_latlong_feat, primitive=Latitude)
longitude = ft.Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features, instance_ids=range(15))
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for i, v, in enumerate(real_lats):
assert v == latvalues[i]
for i, v, in enumerate(real_lons):
assert v == lonvalues[i]
def test_latlong_with_nan(pd_es):
df = pd_es['log']
df['latlong'][0] = np.nan
df['latlong'][1] = (10, np.nan)
df['latlong'][2] = (np.nan, 4)
df['latlong'][3] = (np.nan, np.nan)
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
latitude = ft.Feature(log_latlong_feat, primitive=Latitude)
longitude = ft.Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
fm = ft.calculate_feature_matrix(entityset=pd_es, features=features)
latvalues = fm[latitude.get_name()].values
lonvalues = fm[longitude.get_name()].values
assert len(latvalues) == 17
assert len(lonvalues) == 17
real_lats = [np.nan, 10, np.nan, np.nan, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14, np.nan, np.nan]
real_lons = [np.nan, np.nan, 4, np.nan, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6, np.nan, np.nan]
assert np.allclose(latvalues, real_lats, atol=0.0001, equal_nan=True)
assert np.allclose(lonvalues, real_lons, atol=0.0001, equal_nan=True)
def test_haversine(pd_es):
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features,
instance_ids=range(15))
values = df[haversine.get_name()].values
real = [0, 525.318462, 1045.32190304, 1554.56176802, 2047.3294327, 0,
138.16578931, 276.20524822, 413.99185444, 0, 0, 525.318462, 0,
741.57941183, 1467.52760175]
assert len(values) == 15
assert np.allclose(values, real, atol=0.0001)
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine(unit='kilometers'))
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features,
instance_ids=range(15))
values = df[haversine.get_name()].values
real_km = [0, 845.41812212, 1682.2825471, 2501.82467535, 3294.85736668,
0, 222.35628593, 444.50926278, 666.25531268, 0, 0,
845.41812212, 0, 1193.45638714, 2361.75676089]
assert len(values) == 15
assert np.allclose(values, real_km, atol=0.0001)
error_text = "Invalid unit inches provided. Must be one of"
with pytest.raises(ValueError, match=error_text):
Haversine(unit='inches')
def test_haversine_with_nan(pd_es):
# Check some `nan` values
df = pd_es['log']
df['latlong'][0] = np.nan
df['latlong'][1] = (10, np.nan)
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [np.nan, np.nan, 1045.32190304, 1554.56176802, 2047.3294327, 0,
138.16578931, 276.20524822, 413.99185444, 0, 0, 525.318462, 0,
741.57941183, 1467.52760175, np.nan, np.nan]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
# Check all `nan` values
df = pd_es['log']
df['latlong2'] = np.nan
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [np.nan] * pd_es['log'].shape[0]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
def test_text_primitives(es):
words = ft.Feature(es['log'].ww['comments'], primitive=NumWords)
chars = ft.Feature(es['log'].ww['comments'], primitive=NumCharacters)
features = [words, chars]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15)),
index='id',
sort_index=True)
word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,
240, 1239, 3, 3, 3, 3]
char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,
437, 1325, 6322, 10, 10, 10, 10]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_isin_feat(es):
isin = ft.Feature(es['log'].ww['product_id'], primitive=IsIn(list_of_outputs=["toothpaste", "coke zero"]))
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = ft.Feature(es['log'].ww['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = ft.Feature(es['log'].ww['value']).isin([5, 10])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_custom(es):
def pd_is_in(array, list_of_outputs=None):
if list_of_outputs is None:
list_of_outputs = []
return array.isin(list_of_outputs)
def isin_generate_name(self, base_feature_names):
return u"%s.isin(%s)" % (base_feature_names[0],
str(self.kwargs['list_of_outputs']))
IsIn = make_trans_primitive(
pd_is_in,
[ColumnSchema()],
ColumnSchema(logical_type=Boolean),
name="is_in",
description="For each value of the base feature, checks whether it is "
"in a list that is provided.",
cls_attributes={"generate_name": isin_generate_name})
isin = ft.Feature(es['log'].ww['product_id'], primitive=IsIn(list_of_outputs=["toothpaste", "coke zero"]))
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = ft.Feature(es['log'].ww['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = ft.Feature(es['log'].ww['value']).isin([5, 10])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isnull_feat(pd_es):
value = ft.Feature(pd_es['log'].ww['value'])
diff = ft.Feature(value, groupby=ft.Feature(pd_es['log'].ww['session_id']), primitive=Diff)
isnull = ft.Feature(diff, primitive=IsNull)
features = [isnull]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features, instance_ids=range(15))
correct_vals = [True, False, False, False, False, True, False, False,
False, True, True, False, True, False, False]
values = df[isnull.get_name()].tolist()
assert correct_vals == values
def test_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
feature_set = FeatureSet([p])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es['log'][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
p2 = ft.Feature(p - 1, primitive=Percentile)
feature_set = FeatureSet([p, p2])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es['log'][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_agg_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
agg = ft.Feature(p, parent_dataframe_name='sessions', primitive=Sum)
feature_set = FeatureSet([agg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es['log'][[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum()[[0, 1]]
for t, a in zip(true_p.values, df[agg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
agg = ft.Feature(p, parent_dataframe_name='sessions', primitive=Sum)
pagg = ft.Feature(agg, primitive=Percentile)
feature_set = FeatureSet([pagg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es['log'][[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
agg = ft.Feature(v, parent_dataframe_name='sessions', primitive=Sum)
pagg = ft.Feature(agg, primitive=Percentile)
feature_set = FeatureSet([pagg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es['log'][[v.get_name(), 'session_id']]
true_p = log_vals.groupby('session_id')[v.get_name()].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_percentile(pd_es):
v = ft.Feature(pd_es['customers'].ww['age'])
p = ft.Feature(v, primitive=Percentile)
d = ft.Feature(p, 'sessions')
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
cust_vals = pd_es['customers'][[v.get_name()]]
cust_vals['percentile'] = cust_vals[v.get_name()].rank(pct=True)
true_p = cust_vals['percentile'].loc[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_agg_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
agg = ft.Feature(p, parent_dataframe_name='customers', primitive=Sum)
d = ft.Feature(agg, 'sessions')
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es['log'][[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
log_vals['customer_id'] = [0] * 10 + [1] * 5 + [2] * 2
true_p = log_vals.groupby('customer_id')['percentile'].sum().fillna(0)
true_p = true_p[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or round(t, 3) == round(a, 3)
def test_percentile_with_cutoff(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
feature_set = FeatureSet([p])
calculator = FeatureSetCalculator(pd_es, feature_set, pd.Timestamp('2011/04/09 10:30:13'))
df = calculator.run(np.array([2]))
assert df[p.get_name()].tolist()[0] == 1.0
def test_two_kinds_of_dependents(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
product = ft.Feature(pd_es['log'].ww['product_id'])
agg = ft.Feature(v, parent_dataframe_name='customers', where=product == 'coke zero', primitive=Sum)
p = ft.Feature(agg, primitive=Percentile)
g = ft.Feature(agg, primitive=Absolute)
agg2 = ft.Feature(v, parent_dataframe_name='sessions', where=product == 'coke zero', primitive=Sum)
agg3 = ft.Feature(agg2, parent_dataframe_name='customers', primitive=Sum)
feature_set = FeatureSet([p, g, agg3])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
assert df[p.get_name()].tolist() == [2. / 3, 1.0]
assert df[g.get_name()].tolist() == [15, 26]
def test_make_transform_multiple_output_features(pd_es):
def test_time(x):
times = pd.Series(x)
units = ["year", "month", "day", "hour", "minute", "second"]
return [times.apply(lambda x: getattr(x, unit)) for unit in units]
def gen_feat_names(self):
subnames = ["Year", "Month", "Day", "Hour", "Minute", "Second"]
return ["Now.%s(%s)" % (subname, self.base_features[0].get_name())
for subname in subnames]
TestTime = make_trans_primitive(
function=test_time,
input_types=[ColumnSchema(logical_type=Datetime)],
return_type=ColumnSchema(semantic_tags={'numeric'}),
number_output_features=6,
cls_attributes={"get_feature_names": gen_feat_names},
)
join_time_split = ft.Feature(pd_es["log"].ww["datetime"], primitive=TestTime)
alt_features = [ft.Feature(pd_es["log"].ww["datetime"], primitive=Year),
ft.Feature(pd_es["log"].ww["datetime"], primitive=Month),
ft.Feature(pd_es["log"].ww["datetime"], primitive=Day),
ft.Feature(pd_es["log"].ww["datetime"], primitive=Hour),
ft.Feature(pd_es["log"].ww["datetime"], primitive=Minute),
ft.Feature(pd_es["log"].ww["datetime"], primitive=Second)]
fm, fl = ft.dfs(
entityset=pd_es,
target_dataframe_name="log",
agg_primitives=['sum'],
trans_primitives=[TestTime, Year, Month, Day, Hour, Minute, Second, Diff],
max_depth=5)
subnames = join_time_split.get_feature_names()
altnames = [f.get_name() for f in alt_features]
for col1, col2 in zip(subnames, altnames):
assert (fm[col1] == fm[col2]).all()
for i in range(6):
f = 'sessions.customers.SUM(log.TEST_TIME(datetime)[%d])' % i
assert feature_with_name(fl, f)
assert ('products.DIFF(SUM(log.TEST_TIME(datetime)[%d]))' % i) in fl
def test_feature_names_inherit_from_make_trans_primitive():
# R TODO
pass
def test_get_filepath(es):
class Mod4(TransformPrimitive):
'''Return base feature modulo 4'''
name = "mod4"
input_types = [ColumnSchema(semantic_tags={'numeric'})]
return_type = ColumnSchema(semantic_tags={'numeric'})
compatibility = [Library.PANDAS, Library.DASK, Library.KOALAS]
def get_function(self):
filepath = self.get_filepath("featuretools_unit_test_example.csv")
reference = pd.read_csv(filepath, header=None, squeeze=True)
def map_to_word(x):
def _map(x):
if pd.isnull(x):
return x
return reference[int(x) % 4]
return x.apply(_map)
return map_to_word
feat = ft.Feature(es['log'].ww['value'], primitive=Mod4)
df = ft.calculate_feature_matrix(features=[feat],
entityset=es,
instance_ids=range(17))
df = to_pandas(df, index='id')
assert pd.isnull(df["MOD4(value)"][15])
assert df["MOD4(value)"][0] == 0
assert df["MOD4(value)"][14] == 2
fm, fl = ft.dfs(entityset=es,
target_dataframe_name="log",
agg_primitives=[],
trans_primitives=[Mod4])
fm = to_pandas(fm, index='id')
assert fm["MOD4(value)"][0] == 0
assert fm["MOD4(value)"][14] == 2
assert | pd.isnull(fm["MOD4(value)"][15]) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 08:14:54 2020
@author: Tom
"""
import ecm
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn.preprocessing import StandardScaler
import scipy
import pandas as pd
from matplotlib import cm
import configparser
# Turn off code warnings (this is not recommended for routine use)
import warnings
warnings.filterwarnings("ignore")
root = 'D:\\pybamm_pnm_results\\Chen2020_v3'
save_im_path = 'D:\\pybamm_pnm_results\\figures'
exp_root = 'D:\\pybamm_pnm_results\\experimental'
exp_files = ['MJ1_0.5C.csv',
'MJ1_1.0C.csv',
'MJ1_1.5C.csv']
base = 'pybamm_pnm_case'
plt.close('all')
savefigs = False
tab_1 = [0, 1, 2, 3, 4]
tab_2 = [5, 6, 7, 8, 9]
tab_5 = [10, 11, 12, 13, 14]
tab_2_third = [15, 16, 17, 18, 19]
tab_1_2 = [20, 21, 22, 23, 24]
amps = ecm.get_amp_cases()
d = ecm.load_all_data()
cases = ecm.get_cases()
#soc_list=[[0.9, 0.8, 0.7],[0.6, 0.5, 0.4],[0.3, 0.2, 0.1]]
#mini_soc_list=[[0.99, 0.98, 0.97],[0.96, 0.95, 0.94],[0.93, 0.92, 0.91]]
soc_list = [[0.9, 0.5, 0.4],
[0.3, 0.2, 0.1]]
mini_soc_list = [[0.09, 0.08],
[0.07, 0.06]]
grp = 'neg'
data = d[0][5.25][0]['data']
def load_experimental():
data_list = []
for ef in exp_files:
fp = os.path.join(exp_root, ef)
data_list.append(pd.read_csv(fp))
return data_list
def get_cases():
cases = [
'1_Chen2020',
'2_Chen2020',
'5_Chen2020',
'3_Chen2020',
'4_Chen2020',
'1_Chen2020c',
'2_Chen2020c',
'5_Chen2020c',
'3_Chen2020c',
'4_Chen2020c',
'1_Chen2020b',
'2_Chen2020b',
'5_Chen2020b',
'3_Chen2020b',
'4_Chen2020b',
'1_Chen2020_third',
'2_Chen2020_third',
'5_Chen2020_third',
'3_Chen2020_third',
'4_Chen2020_third',
]
full = [base + case for case in cases]
cases = {
0: {'file': full[0], 'htc': 5, 'tabs': 1},
1: {'file': full[1], 'htc': 10, 'tabs': 1},
2: {'file': full[2], 'htc': 28, 'tabs': 1},
3: {'file': full[3], 'htc': 50, 'tabs': 1},
4: {'file': full[4], 'htc': 100, 'tabs': 1},
5: {'file': full[5], 'htc': 5, 'tabs': 2},
6: {'file': full[6], 'htc': 10, 'tabs': 2},
7: {'file': full[7], 'htc': 28, 'tabs': 2},
8: {'file': full[8], 'htc': 50, 'tabs': 2},
9: {'file': full[9], 'htc': 100, 'tabs': 2},
10: {'file': full[10], 'htc': 5, 'tabs': 5},
11: {'file': full[11], 'htc': 10, 'tabs': 5},
12: {'file': full[12], 'htc': 28, 'tabs': 5},
13: {'file': full[13], 'htc': 50, 'tabs': 5},
14: {'file': full[14], 'htc': 100, 'tabs': 5},
15: {'file': full[15], 'htc': 5, 'tabs': 1},
16: {'file': full[16], 'htc': 10, 'tabs': 1},
17: {'file': full[17], 'htc': 28, 'tabs': 1},
18: {'file': full[18], 'htc': 50, 'tabs': 1},
19: {'file': full[19], 'htc': 100, 'tabs': 1},
}
return cases
def get_case_details(key):
cases = get_cases()
return cases[key]['htc'], cases[key]['tabs']
def abc(x):
alphabet = np.array(['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p',
'q', 'r', 's', 't',
'u', 'v', 'w', 'x',
'y', 'z'])
return alphabet[x].upper()
def format_case(x, a, expanded=False, print_amps=True):
htc, tabs = get_case_details(x)
if expanded:
text = ('Case ' + abc(x) + ': h=' + str(htc) + ' [W.m-2.K-1] #tabs='
+ str(tabs).capitalize() + ': I=' + str(a) + ' [A]')
else:
if print_amps:
text = 'Case ' + abc(x) + ': I=' + str(a) + ' [A]'
else:
text = 'Case ' + abc(x)
return text
def load_all_data():
config = configparser.ConfigParser()
net = ecm.get_net()
weights = ecm.get_weights(net)
cases = get_cases()
amps = ecm.get_amp_cases()
variables = ecm.get_saved_var_names()
data = {}
for ci in range(len(cases.keys())):
case_folder = os.path.join(root, cases[ci]['file'])
data[ci] = {}
config.read(os.path.join(case_folder, 'config.txt'))
data[ci]['config'] = ecm.config2dict(config)
for amp in amps:
amp_folder = os.path.join(case_folder, str(amp) + 'A')
data[ci][amp] = {}
for vi, v in enumerate(variables):
data[ci][amp][vi] = {}
temp = ecm.load_and_amalgamate(amp_folder, v)
if temp is not None:
if vi == 0:
check_nans = np.any(np.isnan(temp), axis=1)
if np.any(check_nans):
print('Nans removed from', amp_folder)
if np.any(check_nans):
temp = temp[~check_nans, :]
data[ci][amp][vi]['data'] = temp
means = np.zeros(temp.shape[0])
for t in range(temp.shape[0]):
(mean, std_dev) = ecm.weighted_avg_and_std(temp[t, :], weights)
means[t] = mean
data[ci][amp][vi]['mean'] = means
data[ci][amp][vi]['min'] = np.min(temp, axis=1)
data[ci][amp][vi]['max'] = np.max(temp, axis=1)
if temp is not None:
t_hrs = data[ci][amp][10]['data'][:, 0]
cap = t_hrs * amp
data[ci][amp]['capacity'] = cap
return data
def jellyroll_one_plot(data, title, dp=3):
input_dir = ecm.INPUT_DIR
fig, ax = plt.subplots(figsize=(12, 12))
spm_map = np.load(os.path.join(input_dir, 'im_spm_map.npz'))['arr_0']
spm_map_copy = spm_map.copy()
spm_map_copy[np.isnan(spm_map_copy)] = -1
spm_map_copy = spm_map_copy.astype(int)
mask = np.isnan(spm_map)
arr = np.ones_like(spm_map).astype(float)
arr[~mask] = data[spm_map_copy][~mask]
arr[mask] = np.nan
im = ax.imshow(arr, cmap=cm.inferno)
ax.set_axis_off()
plt.colorbar(im, ax=ax, format='%.' + str(dp) + 'f')
ax.set_title(title)
return fig
def find_best_fit(y, report_results=False):
# Set up list of candidate distributions to use
# See https://docs.scipy.org/doc/scipy/reference/stats.html for more
#y = data_spm.copy()
size = len(y)
dist_names = ['norm',
'gumbel_l',
'gumbel_r']
# Set up empty lists to stroe results
chi_square = []
p_values = []
params = []
sc = StandardScaler()
yy = y.reshape(-1, 1)
sc.fit(yy)
y_std = sc.transform(yy)
y_std = y_std.flatten()
# Set up 50 bins for chi-square test
# Observed data will be approximately evenly distrubuted aross all bins
percentile_bins = np.linspace(0, 100, 51)
percentile_cutoffs = np.percentile(y_std, percentile_bins)
observed_frequency, bins = (np.histogram(y_std, bins=percentile_cutoffs))
cum_observed_frequency = np.cumsum(observed_frequency)
# Loop through candidate distributions
for distribution in dist_names:
# Set up distribution and get fitted distribution parameters
dist = getattr(scipy.stats, distribution)
param = dist.fit(y_std)
params.append(param)
# Obtain the KS test P statistic, round it to 5 decimal places
p = scipy.stats.kstest(y_std, distribution, args=param)[1]
p = np.around(p, 5)
p_values.append(p)
# Get expected counts in percentile bins
# This is based on a 'cumulative distrubution function' (cdf)
cdf_fitted = dist.cdf(percentile_cutoffs, *param[:-2], loc=param[-2],
scale=param[-1])
expected_frequency = []
for bin in range(len(percentile_bins) - 1):
expected_cdf_area = cdf_fitted[bin + 1] - cdf_fitted[bin]
expected_frequency.append(expected_cdf_area)
# calculate chi-squared
expected_frequency = np.array(expected_frequency) * size
cum_expected_frequency = np.cumsum(expected_frequency)
ss = sum(((cum_expected_frequency -
cum_observed_frequency) ** 2) / cum_observed_frequency)
chi_square.append(ss)
# Collate results and sort by goodness of fit (best at top)
results = | pd.DataFrame() | pandas.DataFrame |
import datetime
import logging
import os
from datetime import timedelta
from typing import List, Tuple
import streamlit as st
import numpy as np
import pandas as pd
from joblib import load, dump
from sklearn.preprocessing import MinMaxScaler, StandardScaler
SCALERS_MODEL_PATH = os.path.join("../../models/train_features_scalers")
target_columns = ["ANNULATION", "ATTERRISSAGE", "DECOLLAGE", "DETOURNEMENT",
"HEURE D'ARRIVEE", "HEURE DE DEPART", "RAISON D'ANNULATION",
"RETARD A L'ARRIVEE", "RETARD AVION", "RETARD COMPAGNIE",
"RETARD METEO", "RETARD SECURITE", "RETARD SYSTEM", "RETART DE DEPART",
"TEMPS DE VOL", "TEMPS PASSE"]
list_features_to_scale = ['TEMPS PROGRAMME', 'DISTANCE', 'TEMPS DE DEPLACEMENT A TERRE AU DECOLLAGE',
"TEMPS DE DEPLACEMENT A TERRE A L'ATTERRISSAGE", "NOMBRE DE PASSAGERS", "PRIX DU BARIL"]
# FIXME Ajouter des typehinting
def build_features_for_train(df_flights: pd.DataFrame, df_fuel: pd.DataFrame, features_to_scale: List[str],
path_for_scaler: str, delay_param=0) -> Tuple[
pd.DataFrame, pd.DataFrame]:
"""
Builds features for the training dataset.
"""
df_flights = add_price_fuel(df_flights, df_fuel)
df_flights = delete_irrelevant_columns(df_flights)
df_target = df_flights[target_columns]
df_without_target = df_flights.drop(columns=target_columns)
df_without_target, deleted_indexes = handle_missing_values(df_without_target)
df_target = df_target.drop(deleted_indexes).reset_index(drop=True)
add_night_flight_binary_feature(df_without_target)
df_without_target = extracting_time_features_from_date(df_without_target)
change_hour_format(df_without_target)
# Scaling
df_without_target = scale_features(df_without_target, features_to_scale, path=path_for_scaler,
is_train_dataset=True)
# Create RETARD binary target
add_delay_binary_target(df_target, delay_param=delay_param)
df_target["CATEGORIE RETARD"] = df_target["RETARD A L'ARRIVEE"].apply(lambda x: add_categorical_delay_target(x))
df_without_target = df_without_target.drop(
columns=["DEPART PROGRAMME", "ARRIVEE PROGRAMMEE", "IDENTIFIANT", "DATE", "VOL", "CODE AVION"])
return df_without_target, df_target
def build_features_for_test(df_flights: pd.DataFrame, df_fuel: pd.DataFrame, features_to_scale: List[str],
path_for_scaler: str) -> pd.DataFrame:
"""
Builds features for the real-world dataset on which we wish to make our prediction.
"""
df_without_target = add_price_fuel(df_flights, df_fuel)
df_without_target = delete_irrelevant_columns(df_without_target)
df_without_target, deleted_indexes = handle_missing_values(df_without_target)
add_night_flight_binary_feature(df_without_target)
df_without_target = extracting_time_features_from_date(df_without_target)
change_hour_format(df_without_target)
# Scaling
df_without_target = scale_features(df_without_target, features_to_scale, path=path_for_scaler,
is_train_dataset=False)
# Create RETARD binary target
df_without_target = df_without_target.drop(
columns=["DEPART PROGRAMME", "ARRIVEE PROGRAMMEE", "IDENTIFIANT", "DATE", "VOL", "CODE AVION"])
return df_without_target
def build_features(df_flights: pd.DataFrame, df_fuel: pd.DataFrame, features_to_scale: List[str], path_for_scaler: str,
TRAIN_OR_TEST: str, delay_param: int=0):
"""
Build features for the dataset depending on the type of this dataset.
"""
if TRAIN_OR_TEST == "TRAIN":
return build_features_for_train(df_flights, df_fuel, features_to_scale, path_for_scaler, delay_param)
if TRAIN_OR_TEST == "TEST":
return build_features_for_test(df_flights, df_fuel, features_to_scale, path_for_scaler)
def add_price_fuel(df_flights: pd.DataFrame, df_fuel: pd.DataFrame) -> pd.DataFrame:
"""
For each record of the flights' dataframe, adds the fuel price for the date of the flight.
"""
df_fuel["DATE"] = pd.to_datetime(df_fuel["DATE"])
df_flights = pd.merge(df_flights, df_fuel, on="DATE", how="left")
df_flights["PRIX DU BARIL"] = df_flights["PRIX DU BARIL"].fillna(df_flights["PRIX DU BARIL"].mean())
return df_flights
def add_night_flight_binary_feature(df_without_target: pd.DataFrame):
"""
For each record of the flights' dataframe, adds two binary features that indicates if it's a night flight or not.
"""
create_is_night_flight_feature('DEPART PROGRAMME', "DEPART DE NUIT", df_without_target)
create_is_night_flight_feature('ARRIVEE PROGRAMMEE', "ARRIVEE DE NUIT", df_without_target)
def create_is_night_flight_feature(feature: str, is_night_flight_feature: str, df_without_target: pd.DataFrame) -> pd.DataFrame:
"""
Adds a feature that indicates if it's a night flight or not.
"""
df_without_target[is_night_flight_feature] = 0
df_without_target.loc[
(df_without_target[feature] >= 2300) | (
df_without_target[feature] <= 600), is_night_flight_feature] = 1
return df_without_target
def change_hour_format(df_without_target: pd.DataFrame) -> None:
"""
Changes the departure's hour format and the arrival's hour format.
"""
df_without_target["ARRIVEE PROGRAMMEE"] = df_without_target["ARRIVEE PROGRAMMEE"].astype(str).apply(
lambda x: format_hour(x))
df_without_target["DEPART PROGRAMME"] = df_without_target["DEPART PROGRAMME"].astype(str).apply(
lambda x: format_hour(x))
def add_delay_binary_target(df_target: pd.DataFrame, delay_param: int = 0) -> None:
"""
Adds the binary delay feature.
"""
df_target["RETARD"] = 0
df_target.loc[df_target["RETARD A L'ARRIVEE"] > delay_param, 'RETARD'] = 1
def add_categorical_delay_target(retard_a_larrivee_du_vol: float) -> None:
"""
Puts the delay into 3 different categories.
"""
if retard_a_larrivee_du_vol <= 0:
return 0
elif retard_a_larrivee_du_vol <= 180:
return 1
else:
return 2
def get_category_delay_target_in_string(x) -> str:
"""
Labels the delay's categories.
"""
if x == 0:
return "A l'heure"
elif x == 1:
return "Retard <= 3h"
else:
return "Retard > 3h"
def extracting_time_features_from_date(df_without_target: pd.DataFrame) -> pd.DataFrame:
"""
Extracts time features from the date and adds them to the final dataset.
"""
df_without_target['DAY OF THE WEEK'] = df_without_target['DATE'].dt.dayofweek + 1
df_without_target['WEEKEND'] = df_without_target['DAY OF THE WEEK'].apply(lambda x: check_weekend(x))
df_without_target['MONTH'] = df_without_target['DATE'].dt.month
df_without_target['DAY OF THE MONTH'] = df_without_target['DATE'].dt.day
df_without_target["HEURE DE DEPART"] = df_without_target['DEPART PROGRAMME'].apply(
lambda x: convert_time_into_datetime(x).hour)
df_without_target["HEURE D'ARRIVEE"] = df_without_target['ARRIVEE PROGRAMMEE'].apply(
lambda x: convert_time_into_datetime(x).hour)
return df_without_target
def delete_irrelevant_columns(df: pd.DataFrame) -> pd.DataFrame:
"""
Removes irrelevant features.
"""
return df.drop(columns=["NIVEAU DE SECURITE"])
def handle_missing_values(df: pd.DataFrame) -> pd.DataFrame:
"""
Drops every record with a missing value.
"""
indexes = df.index
df = df.dropna()
deleted_indexes = indexes.difference(df.index)
return df.reset_index(drop=True), deleted_indexes
def format_hour(x: str) -> pd.to_timedelta:
"""
Changes the format of the hour.
"""
while len(x) < 4:
x = '0' + x
return pd.to_timedelta(x[:-2] + ':' + x[-2:] + ':00')
def convert_time_into_datetime(time_val):
"""
Converts string time features into datetime.
"""
if pd.isnull(time_val):
return np.nan
else:
if time_val == 2400: time_val = 0
time_val = "{0:04d}".format(int(time_val)) # transform : 0612
time_formatted = datetime.time(int(time_val[0:2]), int(time_val[2:4]))
return time_formatted
def check_weekend(x: int) -> int:
"""
Checks if the extracted day from the date is a weekend or not.
"""
return 1 if x > 5 else 0
def save_scaler(sc: StandardScaler, path: str, feature: str) -> None:
"""
Saves the scaler in a binary file.
"""
if os.path.exists(path + f'/{feature}_std_scaler.bin'):
dump(sc, path + f'/{feature}_std_scaler.bin', compress=True)
else:
with open(path + f'/{feature}_std_scaler.bin', 'x') as f:
dump(sc, path + f'/{feature}_std_scaler.bin', compress=True)
def load_scaler(path: str, feature: str) -> None:
"""
Load the scaler from a binary file.
"""
return load(path + f'/{feature}_std_scaler.bin')
def scale_feature_in_df(df: pd.DataFrame, feature: str, path: str, is_train_dataset: bool = True) -> pd.Series:
"""
Runs a feature scaling of the given feature.
"""
if is_train_dataset:
scaler_feature = StandardScaler()
scaler_feature = scaler_feature.fit(np.array(df[feature]).reshape(-1, 1))
save_scaler(scaler_feature, path, feature)
else:
scaler_feature = load_scaler(path, feature)
return scaler_feature.transform(np.array(df[feature]).reshape(-1, 1))
def scale_features(df: pd.DataFrame, features_to_scale: List[str], path: str,
is_train_dataset: bool = True) -> pd.DataFrame:
"""
Runs the feature scaling for the given list of features.
"""
for feature in features_to_scale:
scale_feature_in_df(df, feature, path, is_train_dataset)
return df
def format_date(df_flights, df_fuel) -> pd.DataFrame:
"""
Changes the format of date feature from floats to actual dates.
"""
period_of_flights = df_flights['DATE'].max() - df_flights['DATE'].min()
scaler = MinMaxScaler()
df_fuel["DATE"] = (scaler.fit_transform(np.array(df_fuel["DATE"]).reshape(-1, 1)) * period_of_flights.days).astype(
int)
df_fuel['DATE FORMATTE'] = df_fuel.apply(lambda x: calculate_date(x, df_flights['DATE'].min()), axis=1)
return df_fuel.drop(columns="DATE")
def calculate_date(x, first_date) -> datetime:
"""
Adds a timedelta to a date.
"""
return first_date + timedelta(days=int(x['DATE']))
def main_feature_engineering(delay):
"""
Runs the feature engineering process.
"""
logging.info("Début de la lecture des datasets utilisés pour la phase d'entraînement...")
flights = pd.read_parquet("../../data/aggregated_data/vols.gzip")
fuel = | pd.read_parquet("../../data/aggregated_data/prix_fuel.gzip") | pandas.read_parquet |
"""
prepare and save dataframe for eda, model training and evaluation :
Data Source :
deepdr : https://isbi.deepdr.org
kaggle : https://www.kaggle.com/c/aptos2019-blindness-detection/
merge two datasets :
- deepdr dataset have : train, valid, test dataset with image quality and diagnosis label
- kaggle dataset have : train and test dataset with diagnosis label
goal :
- use deepdr dataset for training quality model
- merged deepdr and kaggle dataset for training diagnosis model
therefore need to prepare following dataframes : -> save under ./output folder
training quality check model: (use only deepdr dataset)
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
q_traindf : columns = ['im_path', 'im_quality']
q_testdf : columns = ['im_path', 'im_quality']
training diagnosis model: (merge deepdr and kaggle dataset)
(merge deepdr train, valid, and keggle train --> train-test split)
d_traindf : columns = ['im_path', 'diagnosis']
d_testdf : columns = ['im_path', 'diagnosis']
if want to see kaggle score :
k_testdf : columns = ['id_code', 'diagnosis']
"""
import pandas as pd
import config
def generate_quality_df():
"""
generate dataframe for training and evaluating image quality model : only deepdr dataset
(use original train for train-valid spilit, use original valid as test --> so that can evaluate with test)
save : ./output/q_traindf.csv and ./output/q_testdf.csv
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
test_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
print(config.PATH_DISK)
print(config.PATH_VM)
print(train_csv)
train = pd.read_csv(train_csv)
test = pd.read_csv(test_csv)
# generate dataframe with image path and overall quality lable
traindf = pd.DataFrame()
testdf = pd.DataFrame()
traindf['im_path'] = train['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/Images'+x[24:]) # mac
testdf['im_path'] = test['image_path'].apply(lambda x : x.replace('\\', '/')).apply(lambda x : f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/Images'+x[26:]) # mac
traindf['im_quality'] = train['Overall quality'].astype('str')
testdf['im_quality'] = test['Overall quality'].astype('str')
# save output
traindf.to_csv(f'{config.PATH_VM}/data/output/q_traindf.csv')
testdf.to_csv(f'{config.PATH_VM}/data/output/q_testdf.csv')
#print(f'quality : total {traindf.shape[0] + testdf.shape[0]}, train {traindf.shape[0]}, test {testdf.shape[0]}')
print('quality : total {}, train {}, test {}'.format(traindf.shape[0] + testdf.shape[0], traindf.shape[0], testdf.shape[0]))
def generate_diagnosis_df_deepdr():
"""
prepare dataframe for training diagnosis model : using deepdr data
Note : this dataframe from deepdr dataset will be merged with the one
from kaggle dataset, in kaggle dataset train and valid images were not
separated, therefore here also merge train and valid, after mering with
kaggle dataset train and valid will be splitted in model training part.
"""
# read csv containing labels corresponding to the images
train_csv= f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-training/regular-fundus-training.csv'
valid_csv = f'{config.PATH_DISK}/data/deepdr/regular_fundus_images/regular-fundus-validation/regular-fundus-validation.csv'
train = pd.read_csv(train_csv)
valid = | pd.read_csv(valid_csv) | pandas.read_csv |
# encoding: utf-8
from opendatatools.common import RestAgent
from opendatatools.common import date_convert, remove_non_numerical
from bs4 import BeautifulSoup
import datetime
import json
import pandas as pd
import io
from opendatatools.futures.futures_agent import _concat_df
import zipfile
class SHExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
headers = {
"Accept": '*/*',
'Referer': 'http://www.sse.com.cn/market/sseindex/indexlist/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
self.add_headers(headers)
def get_index_list(self):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_ZSLB',
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_index_component(self, index):
url = 'http://query.sse.com.cn/commonSoaQuery.do'
data = {
'sqlId': 'DB_SZZSLB_CFGLB',
'indexCode' : index,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'pageHelp' in rsp:
data = rsp['pageHelp']['data']
return pd.DataFrame(data)
else:
return None
def get_dividend(self, code):
url = 'http://query.sse.com.cn/commonQuery.do'
data = {
'sqlId' : 'COMMON_SSE_GP_SJTJ_FHSG_AGFH_L_NEW',
'security_code_a' : code,
}
response = self.do_request(url, data)
rsp = json.loads(response)
if 'result' in rsp:
data = rsp['result']
return pd.DataFrame(data)
else:
return None
def get_rzrq_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://www.sse.com.cn/market/dealingdata/overview/margin/a/rzrqjygk%s.xls' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('汇总信息').dropna()
df_detail = excel.parse('明细信息').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
def get_pledge_info(self, date):
date2 = date_convert(date, '%Y-%m-%d', '%Y%m%d')
url = 'http://query.sse.com.cn/exportExcel/exportStockPledgeExcle.do?tradeDate=%s' % (date2)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
excel = pd.ExcelFile(io.BytesIO(response))
df_total = excel.parse('交易金额汇总').dropna()
df_detail = excel.parse('交易数量明细').dropna()
df_total['date'] = date
df_detail['date'] = date
return df_total, df_detail
else:
return None, None
class SZExAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE' : 'xls',
'CATALOGID' : '1812',
}
response = self.do_request(url, data, method='GET', type='binary')
df = pd.read_excel(io.BytesIO(response))
return df
def get_index_component(self, index):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1747',
'ZSDM' : index
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_rzrq_info(self, date):
df_total = self._get_rzrq_total(date)
df_detail = self._get_rzrq_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
return df_total, df_detail
def _get_rzrq_total(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY' : 'tab1',
"txtDate": date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_rzrq_detail(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_xxpl',
'TABKEY': 'tab2',
"txtDate" : date,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def get_pledge_info(self, date):
df_total = self._get_pledge_info_total(date)
df_detail = self._get_pledge_info_detail(date)
if df_total is not None:
df_total['date'] = date
if df_detail is not None:
df_detail['date'] = date
df_detail['证券代码'] = df_detail['证券代码'].apply(lambda x: str(x).zfill(6))
return df_total, df_detail
def _get_pledge_info_total(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab1',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
def _get_pledge_info_detail(self, date):
url = 'http://www.szse.cn/szseWeb/ShowReport.szse'
data = {
'SHOWTYPE': 'xls',
'CATALOGID': '1837_gpzyhgxx',
'TABKEY': 'tab2',
"txtDate" : date,
'ENCODE' : 1,
}
response = self.do_request(url, data, method='GET', type='binary')
if response is not None and len(response) > 0:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class CSIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def get_index_list(self):
url = 'http://www.csindex.com.cn/zh-CN/indices/index'
page = 1
result_data = []
while True:
data = {
"data_type" : "json",
"page" : page,
}
response = self.do_request(url, data, method='GET')
rsp = json.loads(response)
page = page + 1
print("fetching data at page %d" % (page) )
if "list" in rsp:
result_data.extend(rsp['list'])
if len(rsp['list']) == 0:
break
else:
return None
return pd.DataFrame(result_data)
def get_index_component(self, index):
url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/%scons.xls' % (index)
response = self.do_request(url, None, method='GET', type='binary')
if response is not None:
df = pd.read_excel(io.BytesIO(response))
return df
else:
return None
class XueqiuAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
# 600000.SH -> SH600000
def convert_to_xq_symbol(self, symbol):
temp = symbol.split(".")
return temp[1] + temp[0]
def convert_to_xq_symbols(self, symbols):
result = ''
for symbol in symbols.split(','):
result = result + self.convert_to_xq_symbol(symbol) + ','
return result
# SH600000 -> 600000.SH
def convert_from_xq_symbol(self, symbol):
market = symbol[0:2]
code = symbol[2:]
return code + '.' + market
def prepare_cookies(self, url):
response = self.do_request(url, None)
if response is not None:
cookies = self.get_cookies()
return cookies
else:
return None
def get_quote(self, symbols):
url = 'https://stock.xueqiu.com/v5/stock/realtime/quotec.json'
data = {
'symbol' : self.convert_to_xq_symbols(symbols)
}
# {"data":[{"symbol":"SH000001","current":3073.8321,"percent":-1.15,"chg":-35.67,"timestamp":1528427643770,"volume":6670380300,"amount":8.03515860132E10,"market_capital":1.393367880255658E13,"float_market_capital":1.254120000811718E13,"turnover_rate":0.64,"amplitude":0.91,"high":3100.6848,"low":3072.5418,"avg_price":3073.832,"trade_volume":5190400,"side":0,"is_trade":true,"level":1,"trade_session":null,"trade_type":null}],"error_code":0,"error_description":null}
response = self.do_request(url, data, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
for rsp in jsonobj['data']:
result.append( {
'time' : datetime.datetime.fromtimestamp(rsp['timestamp']/1000),
'symbol' : self.convert_from_xq_symbol(rsp['symbol']),
'high' : rsp['high'],
'low' : rsp['low'],
'last' : rsp['current'],
'change' : rsp['chg'],
'percent': rsp['percent'],
'volume' : rsp['volume'],
'amount' : rsp['amount'],
'turnover_rate' : rsp['turnover_rate'],
'market_capital' : rsp['market_capital'],
'float_market_capital' : rsp['float_market_capital'],
'is_trading' : rsp['is_trade'],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline(self, symbol, timestamp, period, count):
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
cookies = self.prepare_cookies('https://xueqiu.com/hq')
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
result = []
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate' : rsp[8],
} )
return pd.DataFrame(result), ''
else:
return None, jsonobj['error_description']
else:
return None, '请求数据失败'
def get_kline_multisymbol(self, symbols, timestamp, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for symbol in symbols:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
def get_kline_multitimestamp(self, symbol, timestamps, period, count):
cookies = self.prepare_cookies('https://xueqiu.com/hq')
url = 'https://stock.xueqiu.com/v5/stock/chart/kline.json'
result = []
for timestamp in timestamps:
data = {
'symbol' : self.convert_to_xq_symbol(symbol),
'begin' : timestamp,
'period' : period,
'type' : 'before',
'count' : count,
'indicator' : 'kline',
}
response = self.do_request(url, data, cookies=cookies, method='GET')
if response is not None:
jsonobj = json.loads(response)
if jsonobj['error_code'] == 0:
for rsp in jsonobj['data']['item']:
result.append( {
'symbol' : symbol,
'time' : datetime.datetime.fromtimestamp(rsp[0]/1000),
'volume' : rsp[1],
'open' : rsp[2],
'high' : rsp[3],
'low' : rsp[4],
'last' : rsp[5],
'change' : rsp[6],
'percent': rsp[7],
'turnover_rate': rsp[8],
} )
return pd.DataFrame(result), ''
class SinaAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def get_adj_factor(self, symbol):
now = datetime.datetime.now()
year = now.year
month = now.month
if month < 4 :
quarter = 1
elif month < 7:
quarter = 2
elif month < 10:
quarter = 3
else:
quarter = 4
temp = symbol.split(".")
url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vMS_FuQuanMarketHistory/stockid/%s.phtml' % temp[0]
curr_year = year
curr_quarter = quarter
result_list = []
no_data_cnt = 0
while True:
print('getting data for year = %d, quarter = %d' % (curr_year, curr_quarter))
param = {
'year' : curr_year,
'jidu' : curr_quarter,
}
response = self.do_request(url, param, method='GET', encoding='gb18030')
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'tagmain' in div['class']:
tables = div.find_all('table')
for table in tables:
if table.has_attr('id') and table['id'] == 'FundHoldSharesTable':
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 8:
date = SinaAgent.clear_text(cols[0].text)
adjust_factor = SinaAgent.clear_text(cols[7].text)
if date == '日期':
continue
data.append({
"date": date,
"adjust_factor": adjust_factor,
})
result_list.extend(data)
if len(data) == 0:
no_data_cnt = no_data_cnt + 1
if no_data_cnt >= 3:
break
# prepare for next round
if curr_quarter == 1:
curr_year = curr_year - 1
curr_quarter = 4
else:
curr_quarter = curr_quarter - 1
return pd.DataFrame(result_list), ""
# 600000.SH -> SH600000
def convert_to_sina_symbol(self, symbol):
temp = symbol.split(".")
return temp[1].lower() + temp[0]
def get_trade_detail(self, symbol, trade_date):
url = 'http://market.finance.sina.com.cn/downxls.php?date=%s&symbol=%s' % (trade_date, self.convert_to_sina_symbol(symbol))
response = self.do_request(url, None, method='GET', type='text', encoding='gb18030')
if response is not None:
rsp = io.StringIO(response)
line = rsp.readline() # skip first line
line = rsp.readline()
result = []
while line is not None and len(line) > 10:
items = line.split('\t')
if len(items) == 6:
result.append({
'time' : SinaAgent.clear_text(items[0]),
'price' : SinaAgent.clear_text(items[1]),
'change' : SinaAgent.clear_text(items[2]),
'volume' : SinaAgent.clear_text(items[3]),
'turnover': SinaAgent.clear_text(items[4]),
'bs' : SinaAgent.clear_text(items[5]),
})
line = rsp.readline()
df = pd.DataFrame(result)
df['date'] = trade_date
df['symbol'] = symbol
return df, ''
return None, '获取数据失败'
class CNInfoAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
@staticmethod
def clear_text(text):
return text.replace('\n', '').strip()
def _parse_report_file(self, file):
lines = file.readlines()
data_list = []
for i in range(len(lines)):
items = lines[i].decode('gbk').split()
if items[0][:2] == '机构':
head = items[0].split(sep=',')
else:
items = lines[i].decode('gbk')[1:]
data = items.split(sep=',')
data[0] = data[0][1:-1]
data[-1] = remove_non_numerical(data[-1])
data_list.append(data)
df = pd.DataFrame(data_list)
df.columns = head
return df
def get_report_data(self, market, symbol, type):
url = 'http://www.cninfo.com.cn/cninfo-new/data/download'
data = {
'market' : market,
'type' : type,
'code' : symbol,
'orgid' : 'gs%s%s' % (market, symbol),
'minYear' : '1990',
'maxYear' : '2018',
}
response = self.do_request(url, param=data, method='POST', type='binary')
'''if response is None:
return None, '没有获取到数据'
else:
'''
try:
zip_ref = zipfile.ZipFile(io.BytesIO(response))
df_list = []
for finfo in zip_ref.infolist():
file = zip_ref.open(finfo, 'r')
df = self._parse_report_file(file)
df_list.append(df)
df_result = _concat_df(df_list)
df_result.reset_index(inplace=True, drop=True)
return df_result, ''
except:
return None, '获取数据失败'
def get_shareholder_structure(self, market, symbol):
if symbol.startswith('002'):
board = 'sme'
elif symbol.startswith('3'):
board = 'cn'
else:
board = 'mb'
url = 'http://www.cninfo.com.cn/information/lastest/%s%s%s.html' % (market, board, symbol)
response = self.do_request(url, encoding='gb18030')
if response is None:
return None, '获取数据失败'
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
data = []
for div in divs:
if div.has_attr('class') and 'clear' in div['class']:
tables = div.find_all('table')
for table in tables:
rows = table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 2:
indicator = CNInfoAgent.clear_text(cols[0].text).replace(':', '')
value = CNInfoAgent.clear_text(cols[1].text)
data.append({
"indicator": indicator,
"value" : value,
})
break
return pd.DataFrame(data), ""
class EastMoneyAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def _parse_hist_money_flow(self, response):
jsonobj = json.loads(response)
result = []
for data in jsonobj['data']:
items = data.split(',')
result.append({
'Time': items[0],
'ZLJLRJE': items[1],
'ZLJLRZB': items[2],
'CDDJLRJE': items[3],
'CDDJLRZB': items[4],
'DDLRJE': items[5],
'DDLRZB': items[6],
'ZDLRJE': items[7],
'ZDLRZB': items[8],
'XDLRJE': items[9],
'XDLRZB': items[10],
})
return | pd.DataFrame(result) | pandas.DataFrame |
"""Read transaction logs from AceMoney
An investment Action from AceMoney has fields
"Date", "Action", "Symbol", "Account", "Dividend", "Price",
"Quantity", "Commission", "Total", "Comment"
"""
import argparse
from datetime import datetime
import logging
import os
import pickle
from typing import Dict
import pandas as pd
import config
from config import conf
from portdash import portfolio as port
from portdash import create_app
from portdash.apis import quotes
log = logging.getLogger(__name__)
def record_inv_action(portfolio: pd.DataFrame, action: pd.Series) -> pd.DataFrame:
"""Mark the results of an AceMoney investment transaction in a portfolio
`portfolio`: pd.DataFrame
Initialized as per `init_portfolio`
`action`: pd.Series
A row from the all-investment-transactions AceMoney export
"""
port.init_symbol(portfolio, symbol=action["Symbol"])
if not pd.isnull(action["Dividend"]) and action["Dividend"] != 0:
portfolio.loc[action.Date :, f"{action.Symbol}_dist"] += action["Dividend"]
portfolio.loc[action.Date :, f"_total_dist"] += action.Dividend
if not | pd.isnull(action["Quantity"]) | pandas.isnull |
import fitbit_reader.gather_keys_oauth2 as Oauth2
import fitbit_reader.fitbit_steps_tools as tools
import fitbit
import pandas as pd
import datetime
class FitbitReader:
def __init__(
self,
client_id,
client_secret,
min_steps_for_entry_to_be_active=20,
max_contiguous_non_active_entries_for_continuous_session=3,
min_consecutive_active_entries_to_count_as_activity=10,
):
server = Oauth2.OAuth2Server(client_id, client_secret)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
self._client = fitbit.Fitbit(
client_id,
client_secret,
oauth2=True,
access_token=ACCESS_TOKEN,
refresh_token=REFRESH_TOKEN,
)
self._min_steps_for_entry_to_be_active = min_steps_for_entry_to_be_active
self._max_contiguous_non_active_entries_for_continuous_session = \
max_contiguous_non_active_entries_for_continuous_session
self._min_consecutive_active_entries_to_count_as_activity = \
min_consecutive_active_entries_to_count_as_activity
def get_total_active_steps(
self,
date='today',
steps_column="Steps",
):
steps = self.get_intraday_steps_dataframe(
date,
detail_level='1min',
steps_column=steps_column,
)
return tools.get_total_active_steps(
steps,
min_steps_for_entry_to_be_active=
self._min_steps_for_entry_to_be_active,
max_contiguous_non_active_entries_for_continuous_session=
self._max_contiguous_non_active_entries_for_continuous_session,
min_consecutive_active_entries_to_count_as_activity=
self._min_consecutive_active_entries_to_count_as_activity
)
def get_active_steps_dataframe(
self,
date='today',
steps_column="Steps",
):
steps = self.get_intraday_steps_dataframe(
date,
detail_level='1min',
steps_column=steps_column,
)
return tools.get_active_steps_dataframe(
steps,
min_steps_for_entry_to_be_active=
self._min_steps_for_entry_to_be_active,
max_contiguous_non_active_entries_for_continuous_session=
self._max_contiguous_non_active_entries_for_continuous_session,
min_consecutive_active_entries_to_count_as_activity=
self._min_consecutive_active_entries_to_count_as_activity
)
def get_active_steps_dataframes(
self,
date='today',
steps_column="Steps",
):
steps = self.get_intraday_steps_dataframe(
date,
detail_level='1min',
steps_column=steps_column,
)
return tools.get_active_steps_dataframes(
steps,
min_steps_for_entry_to_be_active=
self._min_steps_for_entry_to_be_active,
max_contiguous_non_active_entries_for_continuous_session=
self._max_contiguous_non_active_entries_for_continuous_session,
min_consecutive_active_entries_to_count_as_activity=
self._min_consecutive_active_entries_to_count_as_activity
)
def get_intraday_steps_dataframe(
self,
date='today',
detail_level='1min',
steps_column="Steps",
time_column="Time",
):
if type(date) is datetime.datetime:
date = FitbitReader.datetime_to_string(date)
steps_data = self._client.intraday_time_series(
'activities/steps',
base_date=date,
detail_level=detail_level,
)
time_list = []
val_list = []
for i in steps_data['activities-steps-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
return | pd.DataFrame({steps_column: val_list, time_column: time_list}) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'longitude': float,
'mercury_content_ppm': float,
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.StringDtype(),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'original_planned_operating_date': 'datetime64[ns]',
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'other_combustion_tech': pd.BooleanDtype(),
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(), # Must preserve leading zeroes.
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'retirement_date': 'datetime64[ns]',
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'solid_fuel_gasification': pd.BooleanDtype(),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_attn': pd.StringDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': | pd.StringDtype() | pandas.StringDtype |
from pop_finder import __version__
from pop_finder import pop_finder
from pop_finder import contour_classifier
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import os
import shutil
import pytest
# helper data
infile_all = "tests/test_inputs/onlyAtl_500.recode.vcf.locator.hdf5"
infile_all_vcf = "tests/test_inputs/onlyAtl_500.recode.vcf"
infile_kfcv = "tests/test_inputs/onlyAtl_500_kfcv.recode.vcf"
sample_data1 = "tests/test_inputs/onlyAtl_truelocs.txt"
sample_data2 = "tests/test_inputs/onlyAtl_truelocs_NAs.txt"
sample_data3 = "tests/test_inputs/onlyAtl_truelocs_badsamps.txt"
sample_data4 = "tests/test_inputs/onlyAtl_truelocs_3col.txt"
pred_path = "tests/test_inputs/test_out/loc_boot0_predlocs.txt"
X_train = np.load("tests/test_inputs/X_train.npy")
X_train_empty = np.zeros(shape=0)
y_train = pd.read_csv("tests/test_inputs/y_train.csv")
y_train_empty = pd.DataFrame()
X_test = np.load("tests/test_inputs/X_test.npy")
X_test_empty = np.zeros(shape=0)
y_test = | pd.read_csv("tests/test_inputs/y_test.csv") | pandas.read_csv |
#!/usr/bin/env python3
'''
Script to generate figures that demonstrate the throughput improvements with batching.
It either shows speed up in terms of throughput or throughput with and without batching side by side.
Code that shows throughput with and without batching side by side is commented out at the moment.
'''
import getopt
import matplotlib.pyplot as plt
import pandas as pd
import sys
from orm_classes import Base
from sqlalchemy import create_engine
BAR_PLOT_HEIGHTS = 1.5
def plot_hand_implementation_comparison(connection):
df_hand_implementation = pd.read_sql(
'Select sample_application_name, input_size, relative_deadline, worker_wcet, dop, AVG(min_period) AS min_period ' \
'FROM ThroughputWithHandImplementations ' \
'WHERE is_hand_implementation = 1 ' \
'GROUP BY sample_application_name, input_size, relative_deadline, worker_wcet, dop', connection)
df_peso = pd.read_sql(
'Select sample_application_name, input_size, relative_deadline, worker_wcet, dop, AVG(min_period) AS min_period ' \
'From ThroughputWithHandImplementations ' \
'Where is_hand_implementation = 0 ' \
'GROUP BY sample_application_name, input_size, relative_deadline, worker_wcet, dop', connection)
df = df_peso.join(
df_hand_implementation.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'dop']),
lsuffix='_peso',
rsuffix='_hand_implementation',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'dop']
)
df = df.sort_values(['sample_application_name', 'input_size'], ascending=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
sample_application_names = []
peso_throughput = []
hand_impl_throughput = []
for index, row in df.iterrows():
sample_application_names.append((row['sample_application_name'] + '\ni' + str(row['input_size']))
.replace('reduction', 'RED')
.replace('sparse_matrix_vector_multiplication', 'SMV')
.replace('dense_matrix_vector_multiplication', 'DMV'))
peso_throughput.append(row['min_period_peso'] / 1000.0)
hand_impl_throughput.append(row['min_period_hand_implementation'] / 1000.0)
'''for i in range(10):
sample_application_names.append('TBD')
peso_throughput.append(0.1)
hand_impl_throughput.append(0.1)'''
df_to_plot = pd.DataFrame({
'Peso': peso_throughput,
'Hand impl.': hand_impl_throughput},
index=sample_application_names)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_to_plot)
df_to_plot.plot(kind='bar',
figsize=(6.5, BAR_PLOT_HEIGHTS),
edgecolor='none',
color=['#0165fc', '#f1a340'],
legend=True)
ax = plt.axes()
ax.plot([1], [1])
ax.yaxis.grid()
ax.tick_params(axis=u'both', which=u'both', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('#6F6F6F')
ax.spines['left'].set_color('#6F6F6F')
ax.set_ylabel('Minimum\nperiod'
' ($\mu$s)')
plt.axis('tight')
plt.ylim([0, 1.7])
plt.yticks([0, 0.85, 1.7])
plt.xlim([-ax.patches[0].get_width(), 5 + ax.patches[0].get_width()])
plt.gcf().subplots_adjust(bottom=0.4) # Make sure the x labels are not cut off
leg = plt.legend()
leg.get_frame().set_linewidth(0.0) # Remove the frame around the legend
plt.legend(bbox_to_anchor=(0.56, 1.3), loc=2, borderaxespad=0., ncol=2, frameon=False)
# plt.show()
plt.savefig('../paper/figures/eval_implementation_overhead.pdf', bbox_inches='tight')
def plot_dop_model_accuracy_experiments(connection):
# Load data from the DB into pandas data frames
df_baseline = pd.read_sql('Select * From DOPModelAccuracySample Where is_oracle = 0 and sample = 1',
connection)
df_oracle = pd.read_sql('Select * From DOPModelAccuracySample Where is_oracle = 1 and sample = 1',
connection)
# Prepare data that will be plotted
df = df_baseline.join(
df_oracle.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']),
lsuffix='_baseline',
rsuffix='_oracle',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']
)
df = df.sort_values(['sample_application_name', 'input_size', 'period'], ascending=True)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
# Create data arrays
sample_application_names = []
baseline_dop = []
oracle_dop = []
for index, row in df.iterrows():
sample_application_names.append((row['sample_application_name'] + '\ni' + str(row['input_size']) + ' p' +
str(float(row['period']) / 1000.0).replace('.0', ''))
.replace('reduction', 'RED')
.replace('sparse_matrix_vector_multiplication', 'SMV')
.replace('dense_matrix_vector_multiplication', 'DMV'))
baseline_dop.append(row['dop_baseline'])
oracle_dop.append(row['dop_oracle'])
'''for i in range(5):
sample_application_names.append('TBD')
baseline_dop.append(0.1)
oracle_dop.append(0.1)'''
df_to_plot = pd.DataFrame({'Our analytical framework': baseline_dop,
'Experimental results': oracle_dop},
index=sample_application_names)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_to_plot)
df_to_plot.plot(kind='bar',
figsize=(14, BAR_PLOT_HEIGHTS),
edgecolor='none',
color=['#99d594', '#f1a340'],
legend=True)
ax = plt.axes()
ax.plot([1], [1]) # Remove ticks
ax.yaxis.grid() # Show horizontal lines for better readability
ax.tick_params(axis=u'both', which=u'both', length=0) # Remove ticks
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('#6F6F6F')
ax.spines['left'].set_color('#6F6F6F')
ax.set_ylabel('Worker\ncore count')
# plt.xticks(rotation='horizontal')
plt.yticks([0, 3, 6])
plt.axis('tight') # Remove margins on the very left and very right
plt.ylim([0, 6])
plt.xlim([-ax.patches[0].get_width(), 17 + ax.patches[0].get_width()])
plt.gcf().subplots_adjust(bottom=0.4) # Make sure the x labels are not cut off
leg = plt.legend()
leg.get_frame().set_linewidth(0.0) # Remove the frame around the legend
plt.legend(bbox_to_anchor=(0.6, 1.3), loc=2, borderaxespad=0., ncol=2, frameon=False)
#plt.show()
plt.savefig('../paper/figures/dop_model_oracle_study.pdf', bbox_inches='tight')
def plot_batch_size_model_accuracy_experiments(connection):
# Load data from the DB into pandas data frames
df_baseline = pd.read_sql('Select * From BatchSizeModelAccuracySample Where is_oracle = 0 and sample = 1',
connection)
df_oracle = pd.read_sql('Select * From BatchSizeModelAccuracySample Where is_oracle = 1 and sample = 1',
connection)
# Prepare data that will be plotted i.e. the oracle and the 'our model' data set
df = df_baseline.join(
df_oracle.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']),
lsuffix='_baseline',
rsuffix='_oracle',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']
)
df = df.sort_values(['sample_application_name', 'input_size', 'period'], ascending=True)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
# Create data arrays
sample_application_names = []
baseline_batch_size = []
oracle_batch_size = []
for index, row in df.iterrows():
sample_application_names.append((row['sample_application_name'] + '\ni' + str(row['input_size']) + ' p' +
str(float(row['period']) / 1000.0).replace('.0', ''))
.replace('reduction', 'RED')
.replace('sparse_matrix_vector_multiplication', 'SMV')
.replace('dense_matrix_vector_multiplication', 'DMV'))
baseline_batch_size.append(row['batch_size_baseline'])
oracle_batch_size.append(row['batch_size_oracle'])
'''for i in range(10):
sample_application_names.append('TBD')
baseline_batch_size.append(0.1)
oracle_batch_size.append(0.1)'''
df_to_plot = pd.DataFrame({'Our analytical framework': baseline_batch_size,
'Experimental results': oracle_batch_size},
index=sample_application_names)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_to_plot)
bars = df_to_plot.plot(kind='bar',
figsize=(14, BAR_PLOT_HEIGHTS),
edgecolor='none',
color=['#99d594', '#f1a340'],
legend=False)
# fig = plt.figure(figsize=(4, 5), dpi=100)
ax = plt.axes()
ax.plot([1], [1]) # Remove ticks
ax.yaxis.grid() # Show horizontal lines for better readability
ax.tick_params(axis=u'both', which=u'both', length=0) # Remove ticks
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('#6F6F6F')
ax.spines['left'].set_color('#6F6F6F')
ax.set_ylabel('Batch size')
# plt.xticks(rotation='horizontal')
plt.yticks([0, 6, 12])
plt.axis('tight') # Remove margins on the very left and very right
plt.ylim([0, 12])
plt.xlim([-ax.patches[0].get_width(), 17 + ax.patches[0].get_width()])
plt.gcf().subplots_adjust(bottom=0.4) # Make sure the x labels are not cut off
# leg = plt.legend()
# leg.get_frame().set_linewidth(0.0) # Remove the frame around the legend
# plt.legend(bbox_to_anchor=(0.6, 1.3), loc=2, borderaxespad=0., ncol=2, frameon=False)
# plt.show()
plt.savefig('../paper/figures/batch_size_model_oracle_study.pdf', bbox_inches='tight')
def plot_max_throughput_experiments(connection):
# Load data from the DB into pandas data frames
df_with_batching = | pd.read_sql('Select * From ThroughputSample Where with_batching = 1 and sample = 1;', connection) | pandas.read_sql |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(random_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-random{i}', dataset_name,
f'data/graphs/random/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['random_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_erdos(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, erdos_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(erdos_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-erdos{i}', dataset_name,
f'data/graphs/erdos/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['erdos_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# print(f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites')
# print(dataset.edge_index.shape)
# print(dataset.edge_index)
# if last_edge is None:
# last_edge = dataset.edge_index
# continue
# print((1-last_edge.eq(last_edge).double()).sum())
# continue
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_degree_cat(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
e = num_edges
hubs_experiment = 'global_edges'
for i in range(inits):
for frm in range(0,100,percentile):
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_degree_cat/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_constant_nodes(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for frm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio}nodes_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_constant_nodes/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_attack_target(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for atkfrm in range(0,100,percentile):
for tgtfrm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
atkto = atkfrm + percentile
tgtto = tgtfrm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio:.3f}nodes_{i}_{hubs_experiment}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}', dataset_name,
f'data/graphs/injected_edges_attack_target/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['atkfrm'] = atkfrm
df_cur['atkto'] = atkto
df_cur['tgtfrm'] = tgtfrm
df_cur['tgtto'] = tgtto
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_sbm_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_label_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples,hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-label_sbm_{hubs_experiment}', dataset_name,
f'data/graphs/label_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_conf(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, conf_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(conf_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-confmodel{i}', dataset_name,
f'data/graphs/confmodel/{dataset_name}/{dataset_name}_confmodel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['confmodel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_shifting(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, shifting_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for change in 'CL':
for inc in [True, False]:
for r in [0.16,0.32,0.64]: #[0.02,0.04,0.08]:
for i in range(shifting_inits):
output_prefix = f'data/graphs/shifting/{dataset_name}/{dataset_name}_shifting'
output_suffix = '.cites'
graph_path = f'{output_prefix}_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}'
if not os.path.exists(graph_path):
print(f'File not found: {graph_path}')
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}_shifting_{change}_{"inc" if inc else "dec"}_r{r:.2f}_{i}{output_suffix}',
dataset_name, graph_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph_num'] = i
df_cur['inc'] = inc
df_cur['change'] = change
df_cur['r'] = r
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_sbm_label(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm_label{i}', dataset_name,
f'data/graphs/sbm_label/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['sbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modcm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modcm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modcm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modcm{i}', dataset_name,
f'data/graphs/modcm/{dataset_name}/{dataset_name}_modcm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modcm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_modsbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, modsbm_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(modsbm_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-modsbm{i}', dataset_name,
f'data/graphs/modsbm/{dataset_name}/{dataset_name}_modsbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['modsbm_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_reglabel(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, reglabel_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(reglabel_inits):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-reglabel{i}', dataset_name,
f'data/graphs/reglabel/{dataset_name}/{dataset_name}_reglabel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['reglabel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
################## Synthetic part #####################################
def load_communities(path):
with open(path, 'rb') as handle:
ret = pickle.load(handle)
return ret
def load_labels(path):
label = {}
with open(path, 'r') as handle:
label = {}
for line in handle:
s = line.strip().split()
label[s[0]] = s[-1]
return label
def agg(x):
return len(x.unique())
def calc_uncertainty(df_community,dataset_name,labeled=False,seed=0):
if dataset_name == 'cora':
df_community.label = df_community.label.apply(lambda x : ''.join([c for c in x if c.isupper()]))
if labeled:
df_community = df_community[df_community[f'labeled{seed}']]
communities = df_community.community.unique()
labels = df_community.label.unique()
mtx = df_community.pivot_table(index='community', columns='label',values='node',aggfunc=agg).fillna(0) / len(df_community)
def Pmarg(c):
return len(df_community[df_community.community == c]) / len(df_community)
def Pcond(l,c):
return mtx.loc[c,l]/Pmarg(c)
H = 0
for c in communities:
h = 0
for l in labels:
if Pcond(l,c) == 0:
continue
h += Pcond(l,c) * np.log2(1./Pcond(l,c))
H += h * Pmarg(c)
def Pl(l):
return len(df_community[df_community.label == l]) / len(df_community)
Hl = 0
for l in labels:
if Pl(l) == 0:
continue
Hl += Pl(l) * np.log2(1./Pl(l))
IG = Hl-H
return IG/Hl
def eval_sbm_swap(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, sbm_inits, is_sbm):
step = 10
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(sbm_inits if is_sbm else 1):
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
if is_sbm:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-sbm{i}-', dataset_name,
f'data/graphs/sbm/{dataset_name}/{dataset_name}_sbm_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
else:
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)
data = dataset[0]
community = load_communities(f'data/community_id_dicts/{dataset_name}/{dataset_name}_louvain.pickle')
mapping = data.node_name_mapping
label = load_labels(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
df_community = pd.DataFrame({'dataset':dataset_name, 'node':node, 'community':community[node], 'label':label[node]} for node in community)
df_community['node_id'] = df_community.node.apply(lambda x:mapping[x])
for seed in range(splits):
split = NetworkSplitShchur(dataset, train_examples_per_class=train_examples,early_examples_per_class=0,
val_examples_per_class=val_examples, split_seed=seed)
df_community[f'labeled{seed}'] = df_community.node_id.apply(lambda x: (split.train_mask[x]).numpy())
n = len(data.y)
# select nodes at random
shuffled = np.arange(n)
np.random.shuffle(shuffled)
row = shuffled[:int(n/2)]
col = shuffled[int(n/2):int(n/2)*2]
assert(len(row) == len(col))
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = 0
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
for ratio in range(0,100,step):
frm = int(ratio/100 * len(row))
to = int((ratio+step)/100 * len(row))
U = row[frm:to]
V = col[frm:to]
for u,v in zip(U,V):
tmp = data.x[v].detach().clone()
data.x[v] = dataset[0].x[u]
data.x[u] = tmp
tmp = data.y[v].detach().clone()
data.y[v] = dataset[0].y[u]
data.y[u] = tmp
tmp = df_community.loc[df_community.node_id == v, 'community'].values[0]
df_community.loc[df_community.node_id == v, 'community'] = df_community.loc[df_community.node_id == u, 'community'].values[0]
df_community.loc[df_community.node_id == u, 'community'] = tmp
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
if is_sbm:
df_cur['sbm_num'] = i
df_cur['ratio'] = ratio+step
df_cur['uncertainty'] = calc_uncertainty(df_community, dataset_name)
ulc = [calc_uncertainty(df_community, dataset_name, True, seed) for seed in range(splits)]
df_cur['uncertainty_known'] = [ulc]
print(df_cur)
df_val = pd.concat([df_val, df_cur])
return df_val
################## END: Synthetic part #####################################
def eval_flipped(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=range(10,51,10)):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-flipped{i}', dataset_name,
f'data/graphs/flip_edges/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_removed_hubs(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=[1,2,4,8]):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-removed-hubs{i}', dataset_name,
f'data/graphs/removed_hubs/{dataset_name}/{dataset_name}_{i:02}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_added_2hop_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, percentages=[1,2,4,8,16,32,64,128,256,512]):
print(percentages)
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in percentages:
print(f'data/graphs/processed/{dataset_name}/{dataset_name}.content')
network_path = f'data/graphs/added_2hop_edges/{dataset_name}/{dataset_name}_{i:02}.cites'
if not os.path.exists(network_path):
continue
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-added-2hops{i}', dataset_name,
network_path,
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['percentage'] = i
df_val = | pd.concat([df_val, df_cur]) | pandas.concat |
from __future__ import division
import os
from distutils.version import LooseVersion
from collections import OrderedDict, namedtuple
import numpy as np
import deepdish
import pandas as pd
import corner
import scipy.stats
import matplotlib
import matplotlib.pyplot as plt
from . import utils
from .utils import (logger, infer_parameters_from_function,
check_directory_exists_and_if_not_mkdir)
from .prior import Prior, PriorDict, DeltaFunction
def result_file_name(outdir, label):
""" Returns the standard filename used for a result file
Parameters
----------
outdir: str
Name of the output directory
label: str
Naming scheme of the output file
Returns
-------
str: File name of the output file
"""
return '{}/{}_result.h5'.format(outdir, label)
def read_in_result(filename=None, outdir=None, label=None):
""" Read in a saved .h5 data file
Parameters
----------
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
-------
result: bilby.core.result.Result
Raises
-------
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
if filename is None:
if (outdir is None) and (label is None):
raise ValueError("No information given to load file")
else:
filename = result_file_name(outdir, label)
if os.path.isfile(filename):
return Result(**deepdish.io.load(filename))
else:
raise IOError("No result '{}' found".format(filename))
class Result(object):
def __init__(self, label='no_label', outdir='.', sampler=None,
search_parameter_keys=None, fixed_parameter_keys=None,
priors=None, sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
sampling_time=None, nburn=None, walkers=None,
max_autocorrelation_time=None, parameter_labels=None,
parameter_labels_with_unit=None, version=None):
""" A class to store the results of the sampling run
Parameters
----------
label, outdir, sampler: str
The label, output directory, and sampler used
search_parameter_keys, fixed_parameter_keys: list
Lists of the search and fixed parameter keys. Elemenents of the
list should be of type `str` and matchs the keys of the `prior`
priors: dict, bilby.core.prior.PriorDict
A dictionary of the priors used in the run
sampler_kwargs: dict
Key word arguments passed to the sampler
injection_parameters: dict
A dictionary of the injection parameters
meta_data: dict
A dictionary of meta data to store about the run
posterior: pandas.DataFrame
A pandas data frame of the posterior
samples, nested_samples: array_like
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
sampling_time: float
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
walkers: array_like
The samplers taken by a ensemble MCMC samplers
max_autocorrelation_time: float
The estimated maximum autocorrelation time for MCMC samplers
parameter_labels, parameter_labels_with_unit: list
Lists of the latex-formatted parameter labels
version: str,
Version information for software used to generate the result. Note,
this information is generated when the result object is initialized
Note:
All sampling output parameters, e.g. the samples themselves are
typically not given at initialisation, but set at a later stage.
"""
self.label = label
self.outdir = os.path.abspath(outdir)
self.sampler = sampler
self.search_parameter_keys = search_parameter_keys
self.fixed_parameter_keys = fixed_parameter_keys
self.parameter_labels = parameter_labels
self.parameter_labels_with_unit = parameter_labels_with_unit
self.priors = priors
self.sampler_kwargs = sampler_kwargs
self.meta_data = meta_data
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
def __str__(self):
"""Print a summary """
if getattr(self, 'posterior', None) is not None:
if getattr(self, 'log_noise_evidence', None) is not None:
return ("nsamples: {:d}\n"
"log_noise_evidence: {:6.3f}\n"
"log_evidence: {:6.3f} +/- {:6.3f}\n"
"log_bayes_factor: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_noise_evidence, self.log_evidence,
self.log_evidence_err, self.log_bayes_factor,
self.log_evidence_err))
else:
return ("nsamples: {:d}\n"
"log_evidence: {:6.3f} +/- {:6.3f}\n"
.format(len(self.posterior), self.log_evidence, self.log_evidence_err))
else:
return ''
@property
def priors(self):
if self._priors is not None:
return self._priors
else:
raise ValueError('Result object has no priors')
@priors.setter
def priors(self, priors):
if isinstance(priors, dict):
self._priors = PriorDict(priors)
if self.parameter_labels is None:
self.parameter_labels = [self.priors[k].latex_label for k in
self.search_parameter_keys]
if self.parameter_labels_with_unit is None:
self.parameter_labels_with_unit = [
self.priors[k].latex_label_with_unit for k in
self.search_parameter_keys]
elif priors is None:
self._priors = priors
self.parameter_labels = self.search_parameter_keys
self.parameter_labels_with_unit = self.search_parameter_keys
else:
raise ValueError("Input priors not understood")
@property
def samples(self):
""" An array of samples """
if self._samples is not None:
return self._samples
else:
raise ValueError("Result object has no stored samples")
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def nested_samples(self):
"""" An array of unweighted samples """
if self._nested_samples is not None:
return self._nested_samples
else:
raise ValueError("Result object has no stored nested samples")
@nested_samples.setter
def nested_samples(self, nested_samples):
self._nested_samples = nested_samples
@property
def walkers(self):
"""" An array of the ensemble walkers """
if self._walkers is not None:
return self._walkers
else:
raise ValueError("Result object has no stored walkers")
@walkers.setter
def walkers(self, walkers):
self._walkers = walkers
@property
def nburn(self):
"""" An array of the ensemble walkers """
if self._nburn is not None:
return self._nburn
else:
raise ValueError("Result object has no stored nburn")
@nburn.setter
def nburn(self, nburn):
self._nburn = nburn
@property
def posterior(self):
""" A pandas data frame of the posterior """
if self._posterior is not None:
return self._posterior
else:
raise ValueError("Result object has no stored posterior")
@posterior.setter
def posterior(self, posterior):
self._posterior = posterior
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if version is None:
self._version = 'bilby={}'.format(utils.get_version_information())
else:
self._version = version
def _get_save_data_dictionary(self):
# This list defines all the parameters saved in the result object
save_attrs = [
'label', 'outdir', 'sampler', 'log_evidence', 'log_evidence_err',
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs',
'log_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels',
'parameter_labels_with_unit', 'version']
dictionary = OrderedDict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
except ValueError as e:
logger.debug("Unable to save {}, message: {}".format(attr, e))
pass
return dictionary
def save_to_file(self, overwrite=False):
"""
Writes the Result to a deepdish h5 file
Parameters
----------
overwrite: bool, optional
Whether or not to overwrite an existing result file.
default=False
"""
file_name = result_file_name(self.outdir, self.label)
utils.check_directory_exists_and_if_not_mkdir(self.outdir)
if os.path.isfile(file_name):
if overwrite:
logger.debug('Removing existing file {}'.format(file_name))
os.remove(file_name)
else:
logger.debug(
'Renaming existing file {} to {}.old'.format(file_name,
file_name))
os.rename(file_name, file_name + '.old')
logger.debug("Saving result to {}".format(file_name))
# Convert the prior to a string representation for saving on disk
dictionary = self._get_save_data_dictionary()
if dictionary.get('priors', False):
dictionary['priors'] = {key: str(self.priors[key]) for key in self.priors}
# Convert callable sampler_kwargs to strings to avoid pickling issues
if dictionary.get('sampler_kwargs', None) is not None:
for key in dictionary['sampler_kwargs']:
if hasattr(dictionary['sampler_kwargs'][key], '__call__'):
dictionary['sampler_kwargs'][key] = str(dictionary['sampler_kwargs'])
try:
deepdish.io.save(file_name, dictionary)
except Exception as e:
logger.error("\n\n Saving the data has failed with the "
"following message:\n {} \n\n".format(e))
def save_posterior_samples(self):
"""Saves posterior samples to a file"""
filename = '{}/{}_posterior_samples.txt'.format(self.outdir, self.label)
utils.check_directory_exists_and_if_not_mkdir(self.outdir)
self.posterior.to_csv(filename, index=False, header=True)
def get_latex_labels_from_parameter_keys(self, keys):
""" Returns a list of latex_labels corresponding to the given keys
Parameters
----------
keys: list
List of strings corresponding to the desired latex_labels
Returns
-------
list: The desired latex_labels
"""
latex_labels = []
for k in keys:
if k in self.search_parameter_keys:
idx = self.search_parameter_keys.index(k)
latex_labels.append(self.parameter_labels_with_unit[idx])
elif k in self.parameter_labels:
latex_labels.append(k)
else:
logger.debug(
'key {} not a parameter label or latex label'.format(k))
latex_labels.append(' '.join(k.split('_')))
return latex_labels
@property
def covariance_matrix(self):
""" The covariance matrix of the samples the posterior """
samples = self.posterior[self.search_parameter_keys].values
return np.cov(samples.T)
@property
def posterior_volume(self):
""" The posterior volume """
if self.covariance_matrix.ndim == 0:
return np.sqrt(self.covariance_matrix)
else:
return 1 / np.sqrt(np.abs(np.linalg.det(
1 / self.covariance_matrix)))
@staticmethod
def prior_volume(priors):
""" The prior volume, given a set of priors """
return np.prod([priors[k].maximum - priors[k].minimum for k in priors])
def occam_factor(self, priors):
""" The Occam factor,
See Chapter 28, `Mackay "Information Theory, Inference, and Learning
Algorithms" <http://www.inference.org.uk/itprnn/book.html>`_ Cambridge
University Press (2003).
"""
return self.posterior_volume / self.prior_volume(priors)
def get_one_dimensional_median_and_error_bar(self, key, fmt='.2f',
quantiles=[0.16, 0.84]):
""" Calculate the median and error bar for a given key
Parameters
----------
key: str
The parameter key for which to calculate the median and error bar
fmt: str, ('.2f')
A format string
quantiles: list
A length-2 list of the lower and upper-quantiles to calculate
the errors bars for.
Returns
-------
summary: namedtuple
An object with attributes, median, lower, upper and string
"""
summary = namedtuple('summary', ['median', 'lower', 'upper', 'string'])
if len(quantiles) != 2:
raise ValueError("quantiles must be of length 2")
quants_to_compute = np.array([quantiles[0], 0.5, quantiles[1]])
quants = np.percentile(self.posterior[key], quants_to_compute * 100)
summary.median = quants[1]
summary.plus = quants[2] - summary.median
summary.minus = summary.median - quants[0]
fmt = "{{0:{0}}}".format(fmt).format
string_template = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
summary.string = string_template.format(
fmt(summary.median), fmt(summary.minus), fmt(summary.plus))
return summary
def plot_single_density(self, key, prior=None, cumulative=False,
title=None, truth=None, save=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=[0.16, 0.84], dpi=300):
""" Plot a 1D marginal density, either probablility or cumulative.
Parameters
----------
key: str
Name of the parameter to plot
prior: {bool (True), bilby.core.prior.Prior}
If true, add the stored prior probability density function to the
one-dimensional marginal distributions. If instead a Prior
is provided, this will be plotted.
cumulative: bool
If true plot the CDF
title: bool
If true, add 1D title of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
truth: {bool, float}
If true, plot self.injection_parameters[parameter].
If float, plot this value.
save: bool:
If true, save plot to disk.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The fontsizes for the labels and titles
quantiles: list
A length-2 list of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
Returns
-------
figure: matplotlib.pyplot.figure
A matplotlib figure object
"""
logger.info('Plotting {} marginal distribution'.format(key))
label = self.get_latex_labels_from_parameter_keys([key])[0]
fig, ax = plt.subplots()
ax.hist(self.posterior[key].values, bins=bins, density=True,
histtype='step', cumulative=cumulative)
ax.set_xlabel(label, fontsize=label_fontsize)
if truth is not None:
ax.axvline(truth, ls='-', color='orange')
summary = self.get_one_dimensional_median_and_error_bar(
key, quantiles=quantiles)
ax.axvline(summary.median - summary.minus, ls='--', color='C0')
ax.axvline(summary.median + summary.plus, ls='--', color='C0')
if title:
ax.set_title(summary.string, fontsize=title_fontsize)
if isinstance(prior, Prior):
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, Prior.prob(theta), color='C2')
if save:
fig.tight_layout()
if cumulative:
file_name = file_base_name + key + '_cdf'
else:
file_name = file_base_name + key + '_pdf'
fig.savefig(file_name, dpi=dpi)
return fig
def plot_marginals(self, parameters=None, priors=None, titles=True,
file_base_name=None, bins=50, label_fontsize=16,
title_fontsize=16, quantiles=[0.16, 0.84], dpi=300):
""" Plot 1D marginal distributions
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
file_base_name: str, optional
If given, the base file name to use (by default `outdir/label_` is
used)
bins: int
The number of histogram bins
label_fontsize, title_fontsize: int
The fontsizes for the labels and titles
quantiles: list
A length-2 list of the lower and upper-quantiles to calculate
the errors bars for.
dpi: int
Dots per inch resolution of the plot
Returns
-------
"""
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
truths = parameters
elif parameters is None:
plot_parameter_keys = self.posterior.keys()
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
else:
plot_parameter_keys = list(parameters)
if self.injection_parameters is None:
truths = dict()
else:
truths = self.injection_parameters
if file_base_name is None:
file_base_name = '{}/{}_1d/'.format(self.outdir, self.label)
check_directory_exists_and_if_not_mkdir(file_base_name)
if priors is True:
priors = getattr(self, 'priors', dict())
elif isinstance(priors, dict):
pass
elif priors in [False, None]:
priors = dict()
else:
raise ValueError('Input priors={} not understood'.format(priors))
for i, key in enumerate(plot_parameter_keys):
if not isinstance(self.posterior[key].values[0], float):
continue
prior = priors.get(key, None)
truth = truths.get(key, None)
for cumulative in [False, True]:
fig = self.plot_single_density(
key, prior=prior, cumulative=cumulative, title=titles,
truth=truth, save=True, file_base_name=file_base_name,
bins=bins, label_fontsize=label_fontsize, dpi=dpi,
title_fontsize=title_fontsize, quantiles=quantiles)
plt.close(fig)
def plot_corner(self, parameters=None, priors=None, titles=True, save=True,
filename=None, dpi=300, **kwargs):
""" Plot a corner-plot
Parameters
----------
parameters: (list, dict), optional
If given, either a list of the parameter names to include, or a
dictionary of parameter names and their "true" values to plot.
priors: {bool (False), bilby.core.prior.PriorDict}
If true, add the stored prior probability density functions to the
one-dimensional marginal distributions. If instead a PriorDict
is provided, this will be plotted.
titles: bool
If true, add 1D titles of the median and (by default 1-sigma)
error bars. To change the error bars, pass in the quantiles kwarg.
See method `get_one_dimensional_median_and_error_bar` for further
details). If `quantiles=None` is passed in, no title is added.
save: bool, optional
If true, save the image using the given label and outdir
filename: str, optional
If given, overwrite the default filename
dpi: int, optional
Dots per inch resolution of the plot
**kwargs:
Other keyword arguments are passed to `corner.corner`. We set some
defaults to improve the basic look and feel, but these can all be
overridden.
Notes
-----
The generation of the corner plot themselves is done by the corner
python module, see https://corner.readthedocs.io for more
information.
Returns
-------
fig:
A matplotlib figure instance
"""
# If in testing mode, not corner plots are generated
if utils.command_line_args.test:
return
# bilby default corner kwargs. Overwritten by anything passed to kwargs
defaults_kwargs = dict(
bins=50, smooth=0.9, label_kwargs=dict(fontsize=16),
title_kwargs=dict(fontsize=16), color='#0072C1',
truth_color='tab:orange', quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
plot_density=False, plot_datapoints=True, fill_contours=True,
max_n_ticks=3)
if LooseVersion(matplotlib.__version__) < "2.1":
defaults_kwargs['hist_kwargs'] = dict(normed=True)
else:
defaults_kwargs['hist_kwargs'] = dict(density=True)
if 'lionize' in kwargs and kwargs['lionize'] is True:
defaults_kwargs['truth_color'] = 'tab:blue'
defaults_kwargs['color'] = '#FF8C00'
defaults_kwargs.update(kwargs)
kwargs = defaults_kwargs
# Handle if truths was passed in
if 'truth' in kwargs:
kwargs['truths'] = kwargs.pop('truth')
if kwargs.get('truths'):
truths = kwargs.get('truths')
if isinstance(parameters, list) and isinstance(truths, list):
if len(parameters) != len(truths):
raise ValueError(
"Length of parameters and truths don't match")
elif isinstance(truths, dict) and parameters is None:
parameters = kwargs.pop('truths')
else:
raise ValueError(
"Combination of parameters and truths not understood")
# If injection parameters where stored, use these as parameter values
# but do not overwrite input parameters (or truths)
cond1 = getattr(self, 'injection_parameters', None) is not None
cond2 = parameters is None
if cond1 and cond2:
parameters = {key: self.injection_parameters[key] for key in
self.search_parameter_keys}
# If parameters is a dictionary, use the keys to determine which
# parameters to plot and the values as truths.
if isinstance(parameters, dict):
plot_parameter_keys = list(parameters.keys())
kwargs['truths'] = list(parameters.values())
elif parameters is None:
plot_parameter_keys = self.search_parameter_keys
else:
plot_parameter_keys = list(parameters)
# Get latex formatted strings for the plot labels
kwargs['labels'] = kwargs.get(
'labels', self.get_latex_labels_from_parameter_keys(
plot_parameter_keys))
# Create the data array to plot and pass everything to corner
xs = self.posterior[plot_parameter_keys].values
fig = corner.corner(xs, **kwargs)
axes = fig.get_axes()
# Add the titles
if titles and kwargs.get('quantiles', None) is not None:
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
if ax.title.get_text() == '':
ax.set_title(self.get_one_dimensional_median_and_error_bar(
par, quantiles=kwargs['quantiles']).string,
**kwargs['title_kwargs'])
# Add priors to the 1D plots
if priors is True:
priors = getattr(self, 'priors', False)
if isinstance(priors, dict):
for i, par in enumerate(plot_parameter_keys):
ax = axes[i + i * len(plot_parameter_keys)]
theta = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 300)
ax.plot(theta, priors[par].prob(theta), color='C2')
elif priors in [False, None]:
pass
else:
raise ValueError('Input priors={} not understood'.format(priors))
if save:
if filename is None:
utils.check_directory_exists_and_if_not_mkdir(self.outdir)
filename = '{}/{}_corner.png'.format(self.outdir, self.label)
logger.debug('Saving corner plot to {}'.format(filename))
fig.savefig(filename, dpi=dpi)
return fig
def plot_walkers(self, **kwargs):
""" Method to plot the trace of the walkers in an ensemble MCMC plot """
if hasattr(self, 'walkers') is False:
logger.warning("Cannot plot_walkers as no walkers are saved")
return
if utils.command_line_args.test:
return
nwalkers, nsteps, ndim = self.walkers.shape
idxs = np.arange(nsteps)
fig, axes = plt.subplots(nrows=ndim, figsize=(6, 3 * ndim))
walkers = self.walkers[:, :, :]
for i, ax in enumerate(axes):
ax.plot(idxs[:self.nburn + 1], walkers[:, :self.nburn + 1, i].T,
lw=0.1, color='r')
ax.set_ylabel(self.parameter_labels[i])
for i, ax in enumerate(axes):
ax.plot(idxs[self.nburn:], walkers[:, self.nburn:, i].T, lw=0.1,
color='k')
ax.set_ylabel(self.parameter_labels[i])
fig.tight_layout()
filename = '{}/{}_walkers.png'.format(self.outdir, self.label)
logger.debug('Saving walkers plot to {}'.format('filename'))
utils.check_directory_exists_and_if_not_mkdir(self.outdir)
fig.savefig(filename)
def plot_with_data(self, model, x, y, ndraws=1000, npoints=1000,
xlabel=None, ylabel=None, data_label='data',
data_fmt='o', draws_label=None, filename=None,
maxl_label='max likelihood', dpi=300):
""" Generate a figure showing the data and fits to the data
Parameters
----------
model: function
A python function which when called as `model(x, **kwargs)` returns
the model prediction (here `kwargs` is a dictionary of key-value
pairs of the model parameters.
x, y: np.ndarray
The independent and dependent data to plot
ndraws: int
Number of draws from the posterior to plot
npoints: int
Number of points used to plot the smoothed fit to the data
xlabel, ylabel: str
Labels for the axes
data_label, draws_label, maxl_label: str
Label for the data, draws, and max likelihood legend
data_fmt: str
Matpltolib fmt code, defaults to `'-o'`
dpi: int
Passed to `plt.savefig`
filename: str
If given, the filename to use. Otherwise, the filename is generated
from the outdir and label attributes.
"""
# Determine model_posterior, the subset of the full posterior which
# should be passed into the model
model_keys = infer_parameters_from_function(model)
model_posterior = self.posterior[model_keys]
xsmooth = np.linspace(np.min(x), np.max(x), npoints)
fig, ax = plt.subplots()
logger.info('Plotting {} draws'.format(ndraws))
for _ in range(ndraws):
s = model_posterior.sample().to_dict('records')[0]
ax.plot(xsmooth, model(xsmooth, **s), alpha=0.25, lw=0.1, color='r',
label=draws_label)
try:
if all(~np.isnan(self.posterior.log_likelihood)):
logger.info('Plotting maximum likelihood')
s = model_posterior.iloc[self.posterior.log_likelihood.idxmax()]
ax.plot(xsmooth, model(xsmooth, **s), lw=1, color='k',
label=maxl_label)
except AttributeError:
logger.debug(
"No log likelihood values stored, unable to plot max")
ax.plot(x, y, data_fmt, markersize=2, label=data_label)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
ax.legend(numpoints=3)
fig.tight_layout()
if filename is None:
utils.check_directory_exists_and_if_not_mkdir(self.outdir)
filename = '{}/{}_plot_with_data'.format(self.outdir, self.label)
fig.savefig(filename, dpi=dpi)
def samples_to_posterior(self, likelihood=None, priors=None,
conversion_function=None):
"""
Convert array of samples to posterior (a Pandas data frame)
Also applies the conversion function to any stored posterior
Parameters
----------
likelihood: bilby.likelihood.GravitationalWaveTransient, optional
GravitationalWaveTransient likelihood used for sampling.
priors: dict, optional
Dictionary of prior object, used to fill in delta function priors.
conversion_function: function, optional
Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments.
"""
try:
data_frame = self.posterior
except ValueError:
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
for key in priors:
if isinstance(priors[key], DeltaFunction):
data_frame[key] = priors[key].peak
elif isinstance(priors[key], float):
data_frame[key] = priors[key]
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if conversion_function is not None:
data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame
def calculate_prior_values(self, priors):
"""
Evaluate prior probability for each parameter for each sample.
Parameters
----------
priors: dict, PriorDict
Prior distributions
"""
self.prior_values = | pd.DataFrame() | pandas.DataFrame |
# _*_ encoding:utf-8 _*_
# This script calculates index market capture by day through coingekco api
# market capture = index market cap / sum(each composition's market cap in the index )
# prerequisite:
# 1. install coingecko api python library https://github.com/man-c/pycoingecko
# 2. prepare index compositions info as a csv file which contain the info about when a coin is added
# or removed from the index and its id in coingecko. e.g. dpi_index.csv, mvi_index.csv.
# maintenance: each time a coin is added or removed from a index the csv file must change accordingly.
# result is saved as a csv file which contains the index market capture by day.
from pycoingecko import CoinGeckoAPI
import pandas as pd
import numpy as np
import time
import datetime
today = datetime.datetime.now().strftime("%Y-%m-%d")
# connect coingecko api
cg = CoinGeckoAPI()
def time_to_unix(str_time):
"""
convert str time tp unix timestamp
:param str_time: yyyy-mm-dd
:return: timestamp
"""
return time.mktime(time.strptime(str_time, "%Y-%m-%d"))
def get_index_compositions_market_cap(compositions_table):
"""
get index compositions market cap by day
:param compositions_table: dataframe which contains index composition info
:return: dataframe which is index compositions marketcap by day
"""
coins_cap = pd.DataFrame(columns=['dates','coinscap','coins'])
count = 0
for coin in compositions_table.values:
coin_id = coin[4]
from_timestamp = time_to_unix(coin[2])
if coin[2] == coin[3]:
to_timestamp = time_to_unix(today)
else:
to_timestamp = time_to_unix(coin[3])
datas = cg.get_coin_market_chart_range_by_id(id=coin_id,vs_currency='usd',from_timestamp=from_timestamp,to_timestamp=to_timestamp)
# waxe has no market cap data,so use Fully Diluted Market Cap instead
if coin_id == 'waxe':
datas_df = pd.DataFrame(datas['prices'],columns=['dates','coinscap'])
datas_df['coinscap'] = datas_df['coinscap']*3700000
else:
datas_df = pd.DataFrame(datas['market_caps'],columns=['dates','coinscap'])
datas_df['coins'] = coin[1]
coins_cap=coins_cap.append(datas_df)
time.sleep(5)
count += 1
print('round %d ,get market cap of %s'%(count,coin_id))
coins_cap['days'] = pd.to_datetime(coins_cap['dates'], unit='ms').dt.date
coins_cap = coins_cap.groupby(['coins', 'days']).nth(0).reset_index()
coins_cap = coins_cap.groupby('days')['coinscap'].sum().reset_index()
return coins_cap
def get_index_market_cap(id,from_time):
"""
get index marketcap
:param id: coingekco id
:param from_time: index start time yyyy-mm-dd
:return: dataframe which contains days and marketcap
"""
from_timestamp = time_to_unix(from_time)
to_timestamp = time_to_unix(today)
index_caps = cg.get_coin_market_chart_range_by_id(id=id, vs_currency='usd',
from_timestamp=from_timestamp, to_timestamp=to_timestamp)
index_df = pd.DataFrame(index_caps['market_caps'], columns=['dates', 'index_marketcap'])
index_df['days'] = | pd.to_datetime(index_df['dates'], unit='ms') | pandas.to_datetime |
### backtest_factors.py ###
import pandas as pd
import numpy as np
import time
import datetime
import json
# Measures returns of feature selection parameters
def get_returns(df_result,
df_test,
forward_period,
factor_type,
factor_top_count,
minimum_sample_size,
factor_threshold,
minimum_asset_pct):
min_quantile = df_result['factor_bucket'].min()
max_quantile = df_result['factor_bucket'].max()
# Filter to minimum sample count
df_result = df_result[df_result[str(forward_period)+'_count'] >= minimum_sample_size]
# Set Factor Measure
factor_measure = str(forward_period)+'_'+factor_type
# Compute difference between max and min quantiles
df_meandiff = (df_result[df_result['factor_bucket'] == max_quantile][[factor_measure]]
- df_result[df_result['factor_bucket'] == min_quantile][[factor_measure]])
# Filter to top factors with minimum score
df_top = df_meandiff.drop_duplicates().sort_values(factor_measure, ascending=False).reset_index().groupby('asset').head(factor_top_count).sort_values(['asset',factor_measure])
df_top = df_top[df_top[factor_measure] >= factor_threshold]
df_bot = df_meandiff.drop_duplicates().sort_values(factor_measure, ascending=False).reset_index().groupby('asset').tail(factor_top_count).sort_values(['asset',factor_measure])
df_bot = df_bot[df_bot[factor_measure] <= -factor_threshold]
# Output final set of features
df_algofeatures = df_top.append(df_bot).sort_values('asset')
asset_pct = float(len(df_algofeatures['asset'].drop_duplicates()))/float(len(df_test['asset'].drop_duplicates()))
if asset_pct < minimum_asset_pct:
return None
# Join test data and chosen features
df_backtest = df_test.reset_index().merge(df_algofeatures[['asset','feature',factor_measure]],
how='inner', left_on=['asset','feature'], right_on=['asset','feature'])
# Cap scores to limit position size skew and clean infinite numbers
df_backtest.loc[df_backtest['factor_zscore'] > 3,'factor_zscore'] = 3
df_backtest.loc[df_backtest['factor_zscore'] < -3,'factor_zscore'] = -3
# Determine long/short direction of the factor
df_backtest['direction'] = df_backtest['factor_zscore']/df_backtest['factor_zscore'].abs()
# Use scores as portfolio asset weighting
df_backtest['asset_weight'] = df_backtest['factor_zscore']*df_backtest['direction']
df_backtest = df_backtest.dropna()
df_backtest = df_backtest.groupby(['date','asset'])[['asset_weight',target]].mean()
df_backtest['gross_weight'] = df_backtest['asset_weight'].abs()
df_denom = df_backtest.groupby(['date'])[['gross_weight']].sum()
df_count = df_backtest.groupby(['date'])[['asset_weight']].count()
df_backtest = df_backtest.merge(df_denom, left_index=True, right_index=True, suffixes=['','_sum'])
df_backtest = df_backtest.merge(df_count, left_index=True, right_index=True, suffixes=['','_count'])
df_backtest['portfolio_weight'] = (df_backtest['asset_weight']/(df_backtest['gross_weight_sum']))
# Add uniform index weights to compare returns
df_backtest['index_weight'] = 1.0/df_backtest['asset_weight_count']
df_backtest = df_backtest.reset_index()
# Limits to Tuesdays for rebalancing
df_backtest['dayofweek'] = df_backtest['date'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
import os
import pandas as pd
from tqdm import tqdm
tqdm.pandas()
import glob
import torch
from torch.nn.utils.rnn import pack_padded_sequence
from torchtext import data
import torch.nn as nn
import torch.optim as optim
import time
import sys
import spacy
nlp = spacy.load('en_core_web_sm')
def get_train_set(data_folder, inputfile, subtask):
"""
Read training file
:param:
data_folder: folder contains the raw data
inputfile: name of the input file
subtask: either "a" or "b", else get a warning
Return path to train data and the folder contains the train data
"""
train_folder = data_folder+"_subtask"+subtask
if not os.path.isdir(train_folder):
os.mkdir(train_folder)
file_name = 'train.tsv'
path_to_file = train_folder+"/"+file_name
df = pd.read_table(inputfile, delimiter="\t")
train_data = | pd.DataFrame() | pandas.DataFrame |
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import altair
import itertools
import statsmodels.api as sm
from scipy import stats
import sys
from streamlit import caching
import SessionState
import platform
import base64
from io import BytesIO
from pygam import LinearGAM, LogisticGAM, s
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from factor_analyzer import FactorAnalyzer
from factor_analyzer.factor_analyzer import calculate_bartlett_sphericity
from factor_analyzer.factor_analyzer import calculate_kmo
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
#Session state
session_state = SessionState.get(id = 0)
# Analysis type
analysis_type = st.selectbox("What kind of analysis would you like to conduct?", ["Regression", "Multi-class classification", "Data decomposition"], key = session_state.id)
st.header("**Multivariate data**")
if analysis_type == "Regression":
st.markdown("Get your data ready for powerfull methods: Artificial Neural Networks, Boosted Regression Trees, Random Forest, Generalized Additive Models, Multiple Linear Regression, and Logistic Regression! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Multi-class classification":
st.markdown("Get your data ready for powerfull multi-class classification methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
if analysis_type == "Data decomposition":
st.markdown("Decompose your data with Principal Component Analysis or Factor Analysis! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.beta_expander('Upload settings')
with separator_expander:
a4,a5=st.beta_columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = session_state.id)
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = session_state.id)
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = session_state.id)
a4,a5=st.beta_columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = session_state.id)
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = session_state.id)
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = session_state.id)
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = session_state.id)
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
else:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
df = pd.read_csv("default data/WHR_2021.csv", sep = ";|,|\t",engine='python')
df_name="WHR_2021"
if analysis_type == "Multi-class classification":
df = pd.read_csv("default data/iris.csv", sep = ";|,|\t",engine='python')
df_name="iris"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.beta_expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4)
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False)
sett_theme = st.selectbox('Theme', ["Light", "Dark"])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
session_state.id = session_state.id + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
# Check if enough data is available
if n_rows > 0 and n_cols > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
data_exploration_container = st.beta_container()
with data_exploration_container:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.beta_expander("Explore raw data info and stats ", expanded = False)
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if analysis_type == "Regression" or analysis_type == "Data decomposition":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from the Gallup World Poll surveys from 2018 to 2020. For more details see the [World Happiness Report 2021] (https://worldhappiness.report/).")
st.markdown("**Citation:**")
st.markdown("Helliwell, <NAME>., <NAME>, <NAME>, and <NAME>, eds. 2021. World Happiness Report 2021. New York: Sustainable Development Solutions Network.")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("Country")
col2.write("country name")
col1,col2=st.beta_columns(2)
col1.write("Year ")
col2.write("year ranging from 2005 to 2020")
col1,col2=st.beta_columns(2)
col1.write("Ladder")
col2.write("happiness score or subjective well-being with the best possible life being a 10, and the worst possible life being a 0")
col1,col2=st.beta_columns(2)
col1.write("Log GDP per capita")
col2.write("in purchasing power parity at constant 2017 international dollar prices")
col1,col2=st.beta_columns(2)
col1.write("Social support")
col2.write("the national average of the binary responses (either 0 or 1) to the question regarding relatives or friends to count on")
col1,col2=st.beta_columns(2)
col1.write("Healthy life expectancy at birth")
col2.write("based on the data extracted from the World Health Organization’s Global Health Observatory data repository")
col1,col2=st.beta_columns(2)
col1.write("Freedom to make life choices")
col2.write("national average of responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Generosity")
col2.write("residual of regressing national average of response to the question regarding money donations in the past month on GDP per capita")
col1,col2=st.beta_columns(2)
col1.write("Perceptions of corruption")
col2.write("the national average of the survey responses to the corresponding question")
col1,col2=st.beta_columns(2)
col1.write("Positive affect")
col2.write("the average of three positive affect measures (happiness, laugh and enjoyment)")
col1,col2=st.beta_columns(2)
col1.write("Negative affect (worry, sadness and anger)")
col2.write("the average of three negative affect measures (worry, sadness and anger)")
st.markdown("")
if analysis_type == "Multi-class classification":
if st.checkbox("Show data description", value = False, key = session_state.id):
st.markdown("**Data source:**")
st.markdown("The data come from Fisher's Iris data set. See [here] (https://archive.ics.uci.edu/ml/datasets/iris) for more information.")
st.markdown("**Citation:**")
st.markdown("<NAME>. (1936). The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7(2): 179–188. doi: [10.1111/j.1469-1809.1936.tb02137.x] (https://doi.org/10.1111%2Fj.1469-1809.1936.tb02137.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.beta_columns(2)
col1.write("class_category")
col2.write("Numerical category for 'class': Iris Setosa (0), Iris Versicolour (1), and Iris Virginica (2)")
col1,col2=st.beta_columns(2)
col1.write("class")
col2.write("Iris Setosa, Iris Versicolour, and Iris Virginica")
col1,col2=st.beta_columns(2)
col1.write("sepal length")
col2.write("sepal length in cm")
col1,col2=st.beta_columns(2)
col1.write("sepal width")
col2.write("sepal width in cm")
col1,col2=st.beta_columns(2)
col1.write("petal length")
col2.write("petal length in cm")
col1,col2=st.beta_columns(2)
col1.write("petal width")
col2.write("petal width in cm")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data ", value = False, key = session_state.id):
st.write(df)
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info ", value = False, key = session_state.id)
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info ', value = False, key = session_state.id):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data) ', value = False, key = session_state.id):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
#st.subheader("Data processing")
dev_expander_dm_sb = st.beta_expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.beta_columns(3)
else: a1, a3 = st.beta_columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = session_state.id)
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = session_state.id)
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = session_state.id)
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = session_state.id)
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete ", df.columns, key = session_state.id)
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.columns, key = session_state.id)
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin(sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows ", ["No", "Yes"], key = session_state.id)
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs ", ["No", "Yes"], key = session_state.id)
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = session_state.id)
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = session_state.id)
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = session_state.id)
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = session_state.id)
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA ", ["No", "Yes"], key = session_state.id)
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables ", ["Mean", "Median", "Random value"], key = session_state.id)
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables ", ["Mode", "Random value"], key = session_state.id)
df = fc.data_impute(df, sb_DM_dImp_num, sb_DM_dImp_other)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.select_dtypes([np.number]).columns
numCat_options = df.columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log ", transform_options, key = session_state.id)
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt ", transform_options, key = session_state.id)
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring ", transform_options, key = session_state.id)
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = session_state.id)
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization ", transform_options, key = session_state.id)
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization ", transform_options, key = session_state.id)
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = session_state.id)
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = session_state.id)
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = session_state.id)
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = session_state.id)
mult_var2 = st.selectbox(text2, transform_options, key = session_state.id)
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = session_state.id)
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = session_state.id)
div_var2 = st.selectbox(text2, transform_options, key = session_state.id)
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences ', value = False, key = session_state.id):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.beta_expander("Explore cleaned and transformed data info and stats ", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 0 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data ", value = False, key = session_state.id):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed) ", value = False, key = session_state.id)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info ", value = False, key = session_state.id):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data) ', value = False, key = session_state.id):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_multi_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.beta_container()
with data_visualization_container:
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.beta_expander("Explore visualization types ", expanded = False)
with dev_expander_dv:
if df.shape[1] > 0 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = session_state.id)
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.beta_columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy = st.selectbox('Select variable for y-axis', yy_options, key = session_state.id)
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = session_state.id)
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.beta_columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred"),).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# scatter matrix
#Check If variables are numeric
num_cols=[]
for column in df:
if df[column].dtypes in ('float', 'float64', 'int','int64'):
num_cols.append(column)
if len(num_cols)>1:
show_scatter_matrix=st.checkbox('Show scatter matrix',value=False,key= session_state.id)
if show_scatter_matrix==True:
multi_var_sel = st.multiselect('Select variables for scatter matrix', num_cols, num_cols, key = session_state.id)
if len(multi_var_sel)<2:
st.error("ERROR: Please choose at least two variables fro a scatterplot")
else:
#Plot scatter matrix:
scatter_matrix=alt.Chart(df[multi_var_sel]).mark_circle().encode(
x=alt.X(alt.repeat("column"), type='quantitative'),
y=alt.Y(alt.repeat("row"), type='quantitative')
).properties(
width=150,
height=150
).repeat(
row=multi_var_sel,
column=multi_var_sel
).interactive()
st.altair_chart(scatter_matrix, use_container_width=True)
#------------------------------------------------------------------------------------------
# REGRESSION
if analysis_type == "Regression":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container = st.beta_container()
with data_machinelearning_container:
st.header("**Multivariate data modelling**")
st.markdown("Go for creating predictive models of your data using classical and machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please transform the binary response variable into a numeric binary categorization in data processing preferences!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric or binary response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Non-continuous variables are treated as continuous!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms based on chosen response variable
# Binary (has to be integer or float)
if var_cat.loc[response_var] == "binary":
algorithms = ["Multiple Linear Regression", "Logistic Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "binary"
# Multi-class (has to be integer, currently treated as continuous response)
elif var_cat.loc[response_var] == "categorical":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
# Continuous
elif var_cat.loc[response_var] == "numeric":
algorithms = ["Multiple Linear Regression", "Generalized Additive Models", "Random Forest", "Boosted Regression Trees", "Artificial Neural Networks"]
response_var_type = "continuous"
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
# MLR + binary info message
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression") and response_var_type == "binary":
st.warning("WARNING: For Multiple Linear Regression only the full model output will be determined.")
st.markdown("**Model-specific settings**")
# Multiple Linear Regression settings
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
MLR_intercept = "Yes"
MLR_cov_type = "non-robust"
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
if st.checkbox("Adjust settings for Multiple Linear Regression"):
col1, col2 = st.beta_columns(2)
with col1:
MLR_intercept = st.selectbox("Include intercept", ["Yes", "No"])
with col2:
MLR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0", "HC1", "HC2", "HC3"])
MLR_finalPara["intercept"] = MLR_intercept
MLR_finalPara["covType"] = MLR_cov_type
st.write("")
# Logistic Regression settings
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
LR_intercept = "Yes"
LR_cov_type = "non-robust"
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
if st.checkbox("Adjust settings for Logistic Regression"):
col1, col2 = st.beta_columns(2)
with col1:
LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
with col2:
LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
LR_finalPara["intercept"] = LR_intercept
LR_finalPara["covType"] = LR_cov_type
st.write("")
# Generalized Additive Models settings
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
gam_finalPara["intercept"] = "Yes"
gam_finalPara["number of splines"] = 20
gam_finalPara["spline order"] = 3
gam_finalPara["lambda"] = 0.6
gam_lam_search = "No"
if st.checkbox("Adjust settings for Generalized Additive Models"):
gam_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "number of splines", "spline order", "lambda"])
col1, col2 = st.beta_columns(2)
with col1:
gam_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
gam_finalPara["intercept"] = gam_intercept
with col2:
gam_lam_search = st.selectbox("Search for lambda ", ["No", "Yes"])
if gam_lam_search == "Yes":
ls_col1, ls_col2, ls_col3 = st.beta_columns(3)
with ls_col1:
ls_min = st.number_input("Minimum lambda value", value=0.001, step=1e-3, min_value=0.001, format="%.3f")
with ls_col2:
ls_max = st.number_input("Maximum lambda value", value=100.000, step=1e-3, min_value=0.002, format="%.3f")
with ls_col3:
ls_number = st.number_input("Lambda values per variable", value=50, min_value=2)
if ls_number**len(expl_var) > 10000:
st.warning("WARNING: Your grid has " + str(ls_number**len(expl_var)) + " combinations. Please note that searching for lambda will take a lot of time!")
else:
st.info("Your grid has " + str(ls_number**len(expl_var)) + " combinations.")
if gam_lam_search == "No":
gam_col1, gam_col2, gam_col3 = st.beta_columns(3)
if gam_lam_search == "Yes":
gam_col1, gam_col2= st.beta_columns(2)
gam_nos_values = []
gam_so_values = []
gam_lam_values = []
for gset in range(0,len(expl_var)):
var_name = expl_var[gset]
with gam_col1:
nos = st.number_input("Number of splines (" + var_name + ")", value = 20, min_value=1)
gam_nos_values.append(nos)
with gam_col2:
so = st.number_input("Spline order (" + var_name + ")", value = 3, min_value=3)
gam_so_values.append(so)
if gam_lam_search == "No":
with gam_col3:
lam = st.number_input("Lambda (" + var_name + ")", value = 0.6, min_value=0.001, step=1e-3, format="%.3f")
gam_lam_values.append(lam)
if nos <= so:
st.error("ERROR: Please make sure that the number of splines is greater than the spline order for "+ str(expl_var[gset]) + "!")
return
if gam_lam_search == "Yes":
lam = np.round(np.linspace(ls_min, ls_max, ls_number),3)
if len(expl_var) == 1:
gam_lam_values = lam
else:
gam_lam_values = [lam] * len(expl_var)
gam_finalPara.at["value", "number of splines"] = gam_nos_values
gam_finalPara.at["value","spline order"] = gam_so_values
gam_finalPara.at["value","lambda"] = gam_lam_values
st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [100]
brt_finalPara["learning rate"] = [0.1]
brt_finalPara["maximum tree depth"] = [3]
brt_finalPara["sample rate"] = [1]
final_hyPara_values["brt"] = brt_finalPara
if st.checkbox("Adjust settings for Boosted Regression Trees "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_finalPara["number of trees"] = st.number_input("Number of trees ", value=100, step=1, min_value=1)
with col2:
brt_finalPara["learning rate"] = st.slider("Learning rate ", value=0.1, min_value=0.001, max_value=0.1 , step=1e-3, format="%.3f")
with col3:
brt_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=3, step=1, min_value=1, max_value=30)
with col4:
brt_finalPara["sample rate"] = st.slider("Sample rate ", value=1.0, step=0.01, min_value=0.5, max_value=1.0)
final_hyPara_values["brt"] = brt_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search", "Bayes optimization", "sequential model-based optimization"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Boosted Regression Trees settings
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_tunePara["number of trees"] = [50, 500]
brt_tunePara["learning rate"] = [0.001, 0.010]
brt_tunePara["maximum tree depth"] = [2, 10]
brt_tunePara["sample rate"] = [0.8, 1.0]
hyPara_values["brt"] = brt_tunePara
if st.checkbox("Adjust tuning settings for Boosted Regression Trees"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
brt_tunePara["number of trees"] = st.slider("Range for number of trees", 50, 1000, [50, 500])
with col2:
brt_tunePara["learning rate"] = st.slider("Range for learning rate", 0.001, 0.1, [0.001, 0.02], step=1e-3, format="%.3f")
with col3:
brt_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth", 1, 30, [2, 10])
with col4:
brt_tunePara["sample rate"] = st.slider("Range for sample rate", 0.5, 1.0, [0.8, 1.0])
hyPara_values["brt"] = brt_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
else:
st.info("All variables are available for predictions!")
# Check if NAs are present and delete them automatically
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show machine learning summary
if st.checkbox('Show a summary of machine learning settings', value = False):
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.write("Algorithms summary:")
st.write("- Models:", ', '.join(sb_ML_alg))
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
# st.write("- Multiple Linear Regression model: ", MLR_model)
st.write("- Multiple Linear Regression including intercept: ", MLR_intercept)
st.write("- Multiple Linear Regression covariance type: ", MLR_cov_type)
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.write("- Logistic Regression including intercept: ", LR_intercept)
st.write("- Logistic Regression covariance type: ", LR_cov_type)
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.write("- Generalized Additive Models parameters: ")
st.write(gam_finalPara)
if any(a for a in sb_ML_alg if a == "Random Forest") and do_hypTune == "No":
st.write("- Random Forest parameters: ")
st.write(rf_finalPara)
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees") and do_hypTune == "No":
st.write("- Boosted Regression Trees parameters: ")
st.write(brt_finalPara)
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks") and do_hypTune == "No":
st.write("- Artificial Neural Networks parameters: ")
st.write(ann_finalPara)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# Hyperparameter settings summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks" or a == "Boosted Regression Trees" or a == "Random Forest"):
st.write("Hyperparameter-tuning settings summary:")
if do_hypTune == "No":
st.write("- ", do_hypTune_no)
st.write("")
if do_hypTune == "Yes":
st.write("- Search method:", hypTune_method)
st.write("- ", hypTune_nCV, "-fold cross-validation")
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
st.write("- ", hypTune_iter, "iterations in search")
st.write("")
# Random Forest summary
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.write("Random Forest tuning settings summary:")
st.write(rf_tunePara)
# Boosted Regression Trees summary
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.write("Boosted Regression Trees tuning settings summary:")
st.write(brt_tunePara)
# Artificial Neural Networks summary
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.write("Artificial Neural Networks tuning settings summary:")
st.write(ann_tunePara.style.format({"L² regularization": "{:.5}"}))
#st.caption("** Learning rate is only used in adam")
st.write("")
# General settings summary
st.write("General settings summary:")
st.write("- Response variable type: ", response_var_type)
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run models")
st.write("")
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
#Hyperparameter
if do_hypTune == "Yes":
# Tuning
model_tuning_results = ml.model_tuning(df, sb_ML_alg, hypTune_method, hypTune_iter, hypTune_nCV, hyPara_values, response_var_type, response_var, expl_var)
# Save final hyperparameters
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tuning_results = model_tuning_results["rf tuning"]
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [rf_tuning_results.loc["value"]["number of trees"]]
if [rf_tuning_results.loc["value"]["maximum tree depth"]][0] == "None":
rf_finalPara["maximum tree depth"] = None
else:
rf_finalPara["maximum tree depth"] = [rf_tuning_results.loc["value"]["maximum tree depth"]]
rf_finalPara["maximum number of features"] = [rf_tuning_results.loc["value"]["maximum number of features"]]
rf_finalPara["sample rate"] = [rf_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["rf"] = rf_finalPara
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_tuning_results = model_tuning_results["brt tuning"]
brt_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "learning rate", "maximum tree depth", "sample rate"])
brt_finalPara["number of trees"] = [brt_tuning_results.loc["value"]["number of trees"]]
brt_finalPara["learning rate"] = [brt_tuning_results.loc["value"]["learning rate"]]
brt_finalPara["maximum tree depth"] = [brt_tuning_results.loc["value"]["maximum tree depth"]]
brt_finalPara["sample rate"] = [brt_tuning_results.loc["value"]["sample rate"]]
final_hyPara_values["brt"] = brt_finalPara
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tuning_results = model_tuning_results["ann tuning"]
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"]) #"learning rate schedule", "momentum", "epsilon"])
ann_finalPara["weight optimization solver"] = [ann_tuning_results.loc["value"]["weight optimization solver"]]
ann_finalPara["maximum number of iterations"] = [ann_tuning_results.loc["value"]["maximum number of iterations"]]
ann_finalPara["activation function"] = [ann_tuning_results.loc["value"]["activation function"]]
ann_finalPara["hidden layer sizes"] = [ann_tuning_results.loc["value"]["hidden layer sizes"]]
ann_finalPara["learning rate"] = [ann_tuning_results.loc["value"]["learning rate"]]
#ann_finalPara["learning rate schedule"] = [ann_tuning_results.loc["value"]["learning rate schedule"]]
#ann_finalPara["momentum"] = [ann_tuning_results.loc["value"]["momentum"]]
ann_finalPara["L² regularization"] = [ann_tuning_results.loc["value"]["L² regularization"]]
#ann_finalPara["epsilon"] = [ann_tuning_results.loc["value"]["epsilon"]]
final_hyPara_values["ann"] = ann_finalPara
# Lambda search for GAM
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
if gam_lam_search == "Yes":
st.info("Lambda search")
my_bar = st.progress(0.0)
progress = 0
Y_data_gam = df[response_var]
X_data_gam = df[expl_var]
nos = gam_finalPara["number of splines"][0]
so = gam_finalPara["spline order"][0]
lams = gam_lam_values
if response_var_type == "continuous":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LinearGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if response_var_type == "binary":
if gam_finalPara["intercept"][0] == "Yes":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = True).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
if gam_finalPara["intercept"][0] == "No":
gam_grid = LogisticGAM(n_splines = nos, spline_order = so, fit_intercept = False).gridsearch(X_data_gam.values, Y_data_gam.values, lam=lams)
gam_finalPara.at["value", "lambda"] = gam_grid.lam
progress += 1
my_bar.progress(progress/1)
# Model validation
if do_modval == "Yes":
model_val_results = ml.model_val(df, sb_ML_alg, MLR_model, train_frac, val_runs, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara, MLR_finalPara, LR_finalPara)
# Full model (depending on prediction for new data)
if do_modprednew == "Yes":
if new_data_pred is not None:
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
if do_modprednew == "No":
df_new = pd.DataFrame()
model_full_results = ml.model_full(df, df_new, sb_ML_alg, MLR_model, MLR_finalPara, LR_finalPara, response_var_type, response_var, expl_var, final_hyPara_values, gam_finalPara)
# Success message
st.success('Models run successfully!')
else: st.error("ERROR: No data available for Modelling!")
#++++++++++++++++++++++
# ML OUTPUT
# Show only if models were run (no further widgets after run models or the full page reloads)
if run_models == True:
st.write("")
st.write("")
st.header("**Model outputs**")
#--------------------------------------------------------------------------------------
# FULL MODEL OUTPUT
full_output = st.beta_expander("Full model output", expanded = False)
with full_output:
if model_full_results is not None:
st.markdown("**Correlation Matrix & 2D-Histogram**")
# Define variable selector
var_sel_cor = alt.selection_single(fields=['variable', 'variable2'], clear=False,
init={'variable': response_var, 'variable2': response_var})
# Calculate correlation data
corr_data = df[[response_var] + expl_var].corr().stack().reset_index().rename(columns={0: "correlation", 'level_0': "variable", 'level_1': "variable2"})
corr_data["correlation_label"] = corr_data["correlation"].map('{:.2f}'.format)
# Basic plot
base = alt.Chart(corr_data).encode(
x = alt.X('variable2:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12)),
y = alt.Y('variable:O', sort = None, axis = alt.Axis(title = None, labelFontSize = 12))
)
# Correlation values to insert
text = base.mark_text().encode(
text='correlation_label',
color = alt.condition(
alt.datum.correlation > 0.5,
alt.value('white'),
alt.value('black')
)
)
# Correlation plot
corr_plot = base.mark_rect().encode(
color = alt.condition(var_sel_cor, alt.value('#86c29c'), 'correlation:Q', legend = alt.Legend(title = "Bravais-Pearson correlation coefficient", orient = "top", gradientLength = 350), scale = alt.Scale(scheme='redblue', reverse = True, domain = [-1,1]))
).add_selection(var_sel_cor)
# Calculate values for 2d histogram
value_columns = df[[response_var] + expl_var]
df_2dbinned = pd.concat([fc.compute_2d_histogram(var1, var2, df) for var1 in value_columns for var2 in value_columns])
# 2d binned histogram plot
scat_plot = alt.Chart(df_2dbinned).transform_filter(
var_sel_cor
).mark_rect().encode(
alt.X('value2:N', sort = alt.EncodingSortField(field='raw_left_value2'), axis = alt.Axis(title = "Horizontal variable", labelFontSize = 12)),
alt.Y('value:N', axis = alt.Axis(title = "Vertical variable", labelFontSize = 12), sort = alt.EncodingSortField(field='raw_left_value', order = 'descending')),
alt.Color('count:Q', scale = alt.Scale(scheme='reds'), legend = alt.Legend(title = "Count", orient = "top", gradientLength = 350))
)
# Combine all plots
correlation_plot = alt.vconcat((corr_plot + text).properties(width = 400, height = 400), scat_plot.properties(width = 400, height = 400)).resolve_scale(color = 'independent')
corr_plot1 = (corr_plot + text).properties(width = 400, height = 400)
correlation_plot = correlation_plot.properties(padding = {"left": 50, "top": 5, "right": 5, "bottom": 50})
# hist_2d_plot = scat_plot.properties(height = 350)
if response_var_type == "continuous":
st.altair_chart(correlation_plot, use_container_width = True)
if response_var_type == "binary":
st.altair_chart(correlation_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_cor")))
st.write("")
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_partDep")))
st.write("")
# Further graphical output
fm_gam_figs4_col1, fm_gam_figs4_col2 = st.beta_columns(2)
with fm_gam_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["GAM fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_gam_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Generalized Additive Models"]
residuals_fitted_data["Fitted"] = model_full_results["GAM fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_obsResVsFit")))
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
rf_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Random Forest"]
rf_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Random Forest"]
rf_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Random Forest"]
rf_error_est.loc["Residual SE"] = model_full_results["RF Residual SE"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat")))
st.write("")
# Variable importance (via permutation)
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep")))
st.write("")
# Further graphical output
fm_rf_figs4_col1, fm_rf_figs4_col2 = st.beta_columns(2)
with fm_rf_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["RF fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_rf_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Random Forest"]
residuals_fitted_data["Fitted"] = model_full_results["RF fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_obsResVsFit")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE"], columns = ["Value"])
brt_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Boosted Regression Trees"]
brt_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Boosted Regression Trees"]
brt_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Boosted Regression Trees"]
brt_error_est.loc["Residual SE"] = model_full_results["BRT Residual SE"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat")))
st.write("")
# Training score (MSE vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training MSE"])
train_score["Training MSE"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training MSE", title = "training MSE", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training MSE", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
# Variable importance (via permutation)
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep")))
st.write("")
# Further graphical output
fm_brt_figs4_col1, fm_brt_figs4_col2 = st.beta_columns(2)
with fm_brt_figs4_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["BRT fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_brt_figs4_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Boosted Regression Trees"]
residuals_fitted_data["Fitted"] = model_full_results["BRT fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_obsResVsFit")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["MSE", "RMSE", "MAE", "Residual SE", "Best loss"], columns = ["Value"])
ann_error_est.loc["MSE"] = model_full_results["model comparison"].loc["MSE"]["Artificial Neural Networks"]
ann_error_est.loc["RMSE"] = model_full_results["model comparison"].loc["RMSE"]["Artificial Neural Networks"]
ann_error_est.loc["MAE"] = model_full_results["model comparison"].loc["MAE"]["Artificial Neural Networks"]
ann_error_est.loc["Residual SE"] = model_full_results["ANN Residual SE"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep")))
st.write("")
# Further graphical output
fm_ann_figs3_col1, fm_ann_figs3_col2 = st.beta_columns(2)
with fm_ann_figs3_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["ANN fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_ann_figs3_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Artificial Neural Networks"]
residuals_fitted_data["Fitted"] = model_full_results["ANN fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_obsResVsFit")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
st.markdown("**Model comparison**")
st.write("Performance metrics:")
model_comp_sort_enable = (model_full_results["model comparison"]).transpose()
st.write(model_comp_sort_enable.style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompPerf")))
st.write("")
model_full_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_full_res.loc["min"][m] = model_full_results["residuals"][m].min()
model_full_res.loc["25%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.25)
model_full_res.loc["median"][m] = model_full_results["residuals"][m].quantile(q = 0.5)
model_full_res.loc["75%-Q"][m] = model_full_results["residuals"][m].quantile(q = 0.75)
model_full_res.loc["max"][m] = model_full_results["residuals"][m].max()
st.write("Residuals distribution:")
st.write((model_full_res).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompRes")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_comp_sort_enable.to_excel(excel_file, sheet_name="performance_metrics")
model_full_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
# Regression information
fm_mlr_reg_col1, fm_mlr_reg_col2 = st.beta_columns(2)
with fm_mlr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["MLR information"].style.set_precision(user_precision))
# Regression statistics
with fm_mlr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["MLR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["MLR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_coef")))
st.write("")
# ANOVA
st.write("ANOVA:")
st.table(model_full_results["MLR ANOVA"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_ANOVA")))
st.write("")
# Heteroskedasticity tests
if MLR_intercept == "Yes":
st.write("Heteroskedasticity tests:")
st.table(model_full_results["MLR hetTest"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_hetTest")))
st.write("")
# Variable importance (via permutation)
fm_mlr_reg2_col1, fm_mlr_reg2_col2 = st.beta_columns(2)
with fm_mlr_reg2_col1:
st.write("Variable importance (via permutation):")
mlr_varImp_table = model_full_results["MLR variable importance"]
st.table(mlr_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_mlr_reg2_col2:
st.write("")
st.write("")
st.write("")
mlr_varImp_plot_data = model_full_results["MLR variable importance"]
mlr_varImp_plot_data["Variable"] = mlr_varImp_plot_data.index
mlr_varImp = alt.Chart(mlr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(mlr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_varImp")))
st.write("")
# Graphical output
fm_mlr_figs_col1, fm_mlr_figs_col2 = st.beta_columns(2)
with fm_mlr_figs_col1:
st.write("Observed vs Fitted:")
observed_fitted_data = pd.DataFrame()
observed_fitted_data["Observed"] = df[response_var]
observed_fitted_data["Fitted"] = model_full_results["MLR fitted"]
observed_fitted_data["Index"] = df.index
observed_fitted = alt.Chart(observed_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(observed_fitted_data["Fitted"]), max(observed_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "Fitted", "Index"]
)
observed_fitted_plot = observed_fitted + observed_fitted.transform_regression("Fitted", "Observed").mark_line(size = 2, color = "darkred")
st.altair_chart(observed_fitted_plot, use_container_width = True)
with fm_mlr_figs_col2:
st.write("Residuals vs Fitted:")
residuals_fitted_data = pd.DataFrame()
residuals_fitted_data["Residuals"] = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_fitted_data["Fitted"] = model_full_results["MLR fitted"]
residuals_fitted_data["Index"] = df.index
residuals_fitted = alt.Chart(residuals_fitted_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(residuals_fitted_data["Fitted"]), max(residuals_fitted_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Residuals", title = "residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Residuals", "Fitted", "Index"]
)
residuals_fitted_plot = residuals_fitted + residuals_fitted.transform_loess("Fitted", "Residuals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_fitted_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_obsResVsFit")))
st.write("")
fm_mlr_figs1_col1, fm_mlr_figs1_col2 = st.beta_columns(2)
with fm_mlr_figs1_col1:
st.write("Normal QQ-plot:")
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_qqplot")))
with fm_mlr_figs1_col2:
st.write("Scale-Location:")
scale_location_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
scale_location_data["SqrtStandResiduals"] = np.sqrt(abs((residuals - residuals.mean())/residuals.std()))
scale_location_data["Fitted"] = model_full_results["MLR fitted"]
scale_location_data["Index"] = df.index
scale_location = alt.Chart(scale_location_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Fitted", title = "fitted", scale = alt.Scale(domain = [min(scale_location_data["Fitted"]), max(scale_location_data["Fitted"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("SqrtStandResiduals", title = "sqrt(|stand. residuals|)", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["SqrtStandResiduals", "Fitted", "Index"]
)
scale_location_plot = scale_location + scale_location.transform_loess("Fitted", "SqrtStandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(scale_location_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_scaleLoc")))
st.write("")
fm_mlr_figs2_col1, fm_mlr_figs2_col2 = st.beta_columns(2)
with fm_mlr_figs2_col1:
st.write("Residuals vs Leverage:")
residuals_leverage_data = pd.DataFrame()
residuals = model_full_results["residuals"]["Multiple Linear Regression"]
residuals_leverage_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
residuals_leverage_data["Leverage"] = model_full_results["MLR leverage"]
residuals_leverage_data["Index"] = df.index
residuals_leverage = alt.Chart(residuals_leverage_data, height = 200).mark_circle(size=20).encode(
x = alt.X("Leverage", title = "leverage", scale = alt.Scale(domain = [min(residuals_leverage_data["Leverage"]), max(residuals_leverage_data["Leverage"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals","Leverage", "Index"]
)
residuals_leverage_plot = residuals_leverage + residuals_leverage.transform_loess("Leverage", "StandResiduals", bandwidth = 0.5).mark_line(size = 2, color = "darkred")
st.altair_chart(residuals_leverage_plot, use_container_width = True)
with fm_mlr_figs2_col2:
st.write("Cook's distance:")
cooksD_data = pd.DataFrame()
cooksD_data["CooksD"] = model_full_results["MLR Cooks distance"]
cooksD_data["Index"] = df.index
cooksD = alt.Chart(cooksD_data, height = 200).mark_bar(size = 2).encode(
x = alt.X("Index", title = "index", scale = alt.Scale(domain = [-1, max(cooksD_data["Index"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("CooksD", title = "Cook's distance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["CooksD", "Index"]
)
st.altair_chart(cooksD, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_MLR_resVsLev_cooksD")))
# Download link for MLR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["MLR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["MLR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["MLR coefficients"].to_excel(excel_file, sheet_name="coefficients")
model_full_results["MLR ANOVA"].to_excel(excel_file, sheet_name="ANOVA")
model_full_results["MLR hetTest"].to_excel(excel_file, sheet_name="heteroskedasticity_tests")
mlr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "MLR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Multiple Linear Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# LR specific output
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.markdown("**Logistic Regression**")
# Regression information
fm_lr_reg_col1, fm_lr_reg_col2 = st.beta_columns(2)
with fm_lr_reg_col1:
st.write("Regression information:")
st.table(model_full_results["LR information"].style.set_precision(user_precision))
# Regression statistics
with fm_lr_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["LR statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_regStat")))
st.write("")
# Coefficients
st.write("Coefficients:")
st.table(model_full_results["LR coefficients"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_coef")))
st.write("")
# Variable importance (via permutation)
fm_lr_fig1_col1, fm_lr_fig1_col2 = st.beta_columns(2)
with fm_lr_fig1_col1:
st.write("Variable importance (via permutation):")
lr_varImp_table = model_full_results["LR variable importance"]
st.table(lr_varImp_table.style.set_precision(user_precision))
with fm_lr_fig1_col2:
st.write("")
st.write("")
st.write("")
lr_varImp_plot_data = model_full_results["LR variable importance"]
lr_varImp_plot_data["Variable"] = lr_varImp_plot_data.index
lr_varImp = alt.Chart(lr_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(lr_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_varImp")))
st.write("")
fm_lr_fig_col1, fm_lr_fig_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_lr_fig_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["LR fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 2, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_lr_fig_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["LR ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["LR ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Logistic Regression"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Logistic Regression"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_thresAUC")))
st.write("")
# Partial probabilities
st.write("Partial probability plots:")
fm_lr_figs2_col1, fm_lr_figs2_col2 = st.beta_columns(2)
for pp_var in expl_var:
pp_data = pd.DataFrame(columns = [pp_var])
pp_data[pp_var] = model_full_results["LR partial probabilities"][pp_var][pp_var]
pp_data["ProbabilityOfOccurrence"] = model_full_results["LR partial probabilities"][pp_var]["prediction"]
pp_data["Observed"] = df[response_var]
pp_chart = alt.Chart(pp_data, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("ProbabilityOfOccurrence", title = "probability of occurrence", scale = alt.Scale(domain = [0, 1]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["ProbabilityOfOccurrence"] + [pp_var]
)
obs_data_plot = alt.Chart(pp_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pp_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence"] + [pp_var]
)
if expl_var.index(pp_var)%2 == 0:
with fm_lr_figs2_col1:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if expl_var.index(pp_var)%2 == 1:
with fm_lr_figs2_col2:
st.altair_chart(pp_chart + obs_data_plot, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_LR_partProb")))
# Download link for LR output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["LR information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["LR statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["LR coefficients"].to_excel(excel_file, sheet_name="coefficients")
lr_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "LR full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Logistic Regression full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
fm_gam_reg_col1, fm_gam_reg_col2 = st.beta_columns(2)
# Regression information
with fm_gam_reg_col1:
st.write("Regression information:")
st.table(model_full_results["GAM information"].style.set_precision(user_precision))
# Regression statistics
with fm_gam_reg_col2:
st.write("Regression statistics:")
st.table(model_full_results["GAM statistics"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_regStat_bin")))
st.write("")
# Feature significance
st.write("Feature significance:")
st.table(model_full_results["GAM feature significance"].style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_featSig_bin")))
st.write("")
# Variable importance (via permutation)
fm_gam_figs1_col1, fm_gam_figs1_col2 = st.beta_columns(2)
with fm_gam_figs1_col1:
st.write("Variable importance (via permutation):")
gam_varImp_table = model_full_results["GAM variable importance"]
st.table(gam_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_gam_figs1_col2:
st.write("")
st.write("")
st.write("")
gam_varImp_plot_data = model_full_results["GAM variable importance"]
gam_varImp_plot_data["Variable"] = gam_varImp_plot_data.index
gam_varImp = alt.Chart(gam_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(gam_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_varImp_bin")))
st.write("")
# Observed vs. Probability of Occurrence
fm_gam_figs5_col1, fm_gam_figs5_col2 = st.beta_columns(2)
with fm_gam_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["GAM fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[0]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Generalized Additive Models"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_gam_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["GAM ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["GAM ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Generalized Additive Models"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Generalized Additive Models"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_GAM_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_gam_figs3_col1, fm_gam_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_gam = pd.DataFrame(columns = [pd_var])
pd_data_gam[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam["Partial dependence"] = model_full_results["GAM partial dependence"][pd_var]["pd_values"]
pd_data_gam["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_data_gam["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam = alt.Chart(pd_data_gam, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%", "Partial dependence", "Lower 95%"] + [pd_var]
)
pd_data_ticks_gam = pd.DataFrame(columns = [pd_var])
pd_data_ticks_gam[pd_var] = df[pd_var]
pd_data_ticks_gam["y"] = [model_full_results["GAM partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_gam = alt.Chart(pd_data_ticks_gam, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_gam[pd_var].min(), pd_data_ticks_gam[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
pd_data_gam_lower = pd.DataFrame(columns = [pd_var])
pd_data_gam_lower[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_lower["Lower 95%"] = model_full_results["GAM partial dependence"][pd_var]["lower_95"]
pd_chart_gam_lower = alt.Chart(pd_data_gam_lower, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Lower 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Lower 95%"] + [pd_var]
)
pd_data_gam_upper = pd.DataFrame(columns = [pd_var])
pd_data_gam_upper[pd_var] = model_full_results["GAM partial dependence"][pd_var]["x_values"]
pd_data_gam_upper["Upper 95%"] = model_full_results["GAM partial dependence"][pd_var]["upper_95"]
pd_chart_gam_upper = alt.Chart(pd_data_gam_upper, height = 200).mark_line(strokeDash=[1,1], color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Upper 95%", title = "", scale = alt.Scale(domain = [model_full_results["GAM partial dependence min/max"]["min"].min(), model_full_results["GAM partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Upper 95%"] + [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_gam_figs3_col1:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_gam_figs3_col2:
st.altair_chart(pd_ticks_gam + pd_chart_gam_lower + pd_chart_gam_upper + pd_chart_gam, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep_bin")))
st.write("")
# Download link for GAM output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["GAM information"].to_excel(excel_file, sheet_name="regression_information")
model_full_results["GAM statistics"].to_excel(excel_file, sheet_name="regression_statistics")
model_full_results["GAM feature significance"].to_excel(excel_file, sheet_name="feature_significance")
gam_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "GAM full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Generalized Additive Models full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
fm_rf_reg_col1, fm_rf_reg_col2 = st.beta_columns(2)
# Regression information
with fm_rf_reg_col1:
st.write("Regression information:")
st.table(model_full_results["RF information"].style.set_precision(user_precision))
# Regression statistics
with fm_rf_reg_col2:
st.write("Regression statistics:")
rf_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS"], columns = ["Value"])
rf_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Random Forest"]
rf_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Random Forest"]
rf_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Random Forest"]
rf_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Random Forest"]
st.table(rf_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_regStat_bin")))
st.write("")
fm_rf_figs1_col1, fm_rf_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_rf_figs1_col1:
st.write("Variable importance (via permutation):")
rf_varImp_table = model_full_results["RF variable importance"]
st.table(rf_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_rf_figs1_col2:
st.write("")
st.write("")
st.write("")
rf_varImp_plot_data = model_full_results["RF variable importance"]
rf_varImp_plot_data["Variable"] = rf_varImp_plot_data.index
rf_varImp = alt.Chart(rf_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(rf_varImp, use_container_width = True)
st.write("")
fm_rf_figs2_col1, fm_rf_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_rf_figs2_col1:
st.write("Feature importance (impurity-based):")
rf_featImp_table = model_full_results["RF feature importance"]
st.table(rf_featImp_table.style.set_precision(user_precision))
with fm_rf_figs2_col2:
st.write("")
st.write("")
st.write("")
rf_featImp_plot_data = model_full_results["RF feature importance"]
rf_featImp_plot_data["Variable"] = rf_featImp_plot_data.index
rf_featImp = alt.Chart(rf_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(rf_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_varImp_bin")))
st.write("")
fm_rf_figs5_col1, fm_rf_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_rf_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["RF fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Random Forest"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_rf_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["RF ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["RF ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Random Forest"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Random Forest"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_rf_figs3_col1, fm_rf_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_rf = pd.DataFrame(columns = [pd_var])
pd_data_rf[pd_var] = model_full_results["RF partial dependence"][pd_var][1][0]
pd_data_rf["Partial dependence"] = model_full_results["RF partial dependence"][pd_var][0][0]
pd_chart_rf = alt.Chart(pd_data_rf, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_rf = pd.DataFrame(columns = [pd_var])
pd_data_ticks_rf[pd_var] = df[pd_var]
pd_data_ticks_rf["y"] = [model_full_results["RF partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_rf = alt.Chart(pd_data_ticks_rf, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_rf[pd_var].min(), pd_data_ticks_rf[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["RF partial dependence min/max"]["min"].min(), model_full_results["RF partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_rf_figs3_col1:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_rf_figs3_col2:
st.altair_chart(pd_ticks_rf + pd_chart_rf, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_RF_partDep_bin")))
# Download link for RF output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["RF information"].to_excel(excel_file, sheet_name="regression_information")
rf_error_est.to_excel(excel_file, sheet_name="regression_statistics")
rf_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
rf_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "RF full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Random Forest full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
fm_brt_reg_col1, fm_brt_reg_col2 = st.beta_columns(2)
# Regression information
with fm_brt_reg_col1:
st.write("Regression information:")
st.table(model_full_results["BRT information"].style.set_precision(user_precision))
# Regression statistics
with fm_brt_reg_col2:
st.write("Regression statistics:")
brt_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS"], columns = ["Value"])
brt_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Boosted Regression Trees"]
brt_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Boosted Regression Trees"]
brt_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Boosted Regression Trees"]
brt_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Boosted Regression Trees"]
st.table(brt_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_regStat_bin")))
st.write("")
# Training score (deviance vs. number of trees)
st.write("Training score:")
train_score = pd.DataFrame(index = range(model_full_results["BRT train score"].shape[0]), columns = ["Training deviance"])
train_score["Training deviance"] = model_full_results["BRT train score"]
train_score["Trees"] = train_score.index+1
train_score_plot = alt.Chart(train_score, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Trees", title = "trees", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [train_score["Trees"].min(), train_score["Trees"].max()])),
y = alt.Y("Training deviance", title = "training deviance", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Training deviance", "Trees"]
)
st.altair_chart(train_score_plot, use_container_width = True)
st.write("")
fm_brt_figs1_col1, fm_brt_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_brt_figs1_col1:
st.write("Variable importance (via permutation):")
brt_varImp_table = model_full_results["BRT variable importance"]
st.table(brt_varImp_table.style.set_precision(user_precision))
st.write("")
with fm_brt_figs1_col2:
st.write("")
st.write("")
st.write("")
brt_varImp_plot_data = model_full_results["BRT variable importance"]
brt_varImp_plot_data["Variable"] = brt_varImp_plot_data.index
brt_varImp = alt.Chart(brt_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(brt_varImp, use_container_width = True)
st.write("")
fm_brt_figs2_col1, fm_brt_figs2_col2 = st.beta_columns(2)
# Feature importance
with fm_brt_figs2_col1:
st.write("Feature importance (impurity-based):")
brt_featImp_table = model_full_results["BRT feature importance"]
st.table(brt_featImp_table.style.set_precision(user_precision))
with fm_brt_figs2_col2:
st.write("")
st.write("")
st.write("")
brt_featImp_plot_data = model_full_results["BRT feature importance"]
brt_featImp_plot_data["Variable"] = brt_featImp_plot_data.index
brt_featImp = alt.Chart(brt_featImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("Value", title = "feature importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "Value"]
)
st.altair_chart(brt_featImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_varImp_bin")))
st.write("")
fm_brt_figs5_col1, fm_brt_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_brt_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["BRT fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Boosted Regression Trees"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_brt_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["BRT ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["BRT ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Boosted Regression Trees"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Boosted Regression Trees"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_brt_figs3_col1, fm_brt_figs3_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_brt = pd.DataFrame(columns = [pd_var])
pd_data_brt[pd_var] = model_full_results["BRT partial dependence"][pd_var][1][0]
pd_data_brt["Partial dependence"] = model_full_results["BRT partial dependence"][pd_var][0][0]
pd_chart_brt = alt.Chart(pd_data_brt, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_brt = pd.DataFrame(columns = [pd_var])
pd_data_ticks_brt[pd_var] = df[pd_var]
pd_data_ticks_brt["y"] = [model_full_results["BRT partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_brt = alt.Chart(pd_data_ticks_brt, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_brt[pd_var].min(), pd_data_ticks_brt[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["BRT partial dependence min/max"]["min"].min(), model_full_results["BRT partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_brt_figs3_col1:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_brt_figs3_col2:
st.altair_chart(pd_ticks_brt + pd_chart_brt, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_BRT_partDep_bin")))
# Download link for BRT output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["BRT information"].to_excel(excel_file, sheet_name="regression_information")
brt_error_est.to_excel(excel_file, sheet_name="regression_statistics")
brt_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
brt_featImp_table.to_excel(excel_file, sheet_name="feature_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BRT full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Boosted Regression Trees full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
fm_ann_reg_col1, fm_ann_reg_col2 = st.beta_columns(2)
# Regression information
with fm_ann_reg_col1:
st.write("Regression information:")
st.table(model_full_results["ANN information"].style.set_precision(user_precision))
# Regression statistics
with fm_ann_reg_col2:
st.write("Regression statistics:")
ann_error_est = pd.DataFrame(index = ["AUC ROC", "AP", "AUC PRC", "LOG-LOSS", "Best loss"], columns = ["Value"])
ann_error_est.loc["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Artificial Neural Networks"]
ann_error_est.loc["AP"] = model_full_results["model comparison thresInd"].loc["AP"]["Artificial Neural Networks"]
ann_error_est.loc["AUC PRC"] = model_full_results["model comparison thresInd"].loc["AUC PRC"]["Artificial Neural Networks"]
ann_error_est.loc["LOG-LOSS"] = model_full_results["model comparison thresInd"].loc["LOG-LOSS"]["Artificial Neural Networks"]
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
ann_error_est.loc["Best loss"] = model_full_results["ANN loss"]
st.table(ann_error_est.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_regStat_bin")))
st.write("")
# Loss curve (loss vs. number of iterations (epochs))
if ann_finalPara["weight optimization solver"][0] != "lbfgs":
st.write("Loss curve:")
loss_curve = pd.DataFrame(index = range(len(model_full_results["ANN loss curve"])), columns = ["Loss"])
loss_curve["Loss"] = model_full_results["ANN loss curve"]
loss_curve["Iterations"] = loss_curve.index+1
loss_curve_plot = alt.Chart(loss_curve, height = 200).mark_line(color = "darkred").encode(
x = alt.X("Iterations", title = "iterations (epochs)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [loss_curve["Iterations"].min(), loss_curve["Iterations"].max()])),
y = alt.Y("Loss", title = "loss", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Loss", "Iterations"]
)
st.altair_chart(loss_curve_plot, use_container_width = True)
st.write("")
fm_ann_figs1_col1, fm_ann_figs1_col2 = st.beta_columns(2)
# Variable importance (via permutation)
with fm_ann_figs1_col1:
st.write("Variable importance (via permutation):")
ann_varImp_table = model_full_results["ANN variable importance"]
st.table(ann_varImp_table.style.set_precision(user_precision))
with fm_ann_figs1_col2:
st.write("")
st.write("")
st.write("")
ann_varImp_plot_data = model_full_results["ANN variable importance"]
ann_varImp_plot_data["Variable"] = ann_varImp_plot_data.index
ann_varImp = alt.Chart(ann_varImp_plot_data, height = 200).mark_bar().encode(
x = alt.X("mean", title = "variable importance", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Variable", title = None, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), sort = None),
tooltip = ["Variable", "mean"]
)
st.altair_chart(ann_varImp, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_varImp_bin")))
st.write("")
fm_ann_figs5_col1, fm_ann_figs5_col2 = st.beta_columns(2)
# Observed vs. Probability of Occurrence
with fm_ann_figs5_col1:
st.write("Observed vs. Probability of Occurrence:")
prob_data = pd.DataFrame(model_full_results["ANN fitted"])
prob_data["Observed"] = df[response_var]
prob_data["ProbabilityOfOccurrence"] = prob_data[1]
prob_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Artificial Neural Networks"]
prob_data_plot = alt.Chart(prob_data, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X("ProbabilityOfOccurrence", title = "probability of occurrence", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Observed", title = "observed", scale = alt.Scale(domain = [min(prob_data["Observed"]), max(prob_data["Observed"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Observed", "ProbabilityOfOccurrence", "Threshold"]
)
thres = alt.Chart(prob_data, height = 200).mark_rule(size = 1.5, color = "darkred").encode(x = "Threshold", tooltip = ["Threshold"])
prob_plot = prob_data_plot + thres
st.altair_chart(prob_plot, use_container_width = True)
# ROC curve
with fm_ann_figs5_col2:
st.write("ROC curve:")
AUC_ROC_data = pd.DataFrame()
AUC_ROC_data["FPR"] = model_full_results["ANN ROC curve"][0]
AUC_ROC_data["TPR"] = model_full_results["ANN ROC curve"][1]
AUC_ROC_data["AUC ROC"] = model_full_results["model comparison thresInd"].loc["AUC ROC"]["Artificial Neural Networks"]
AUC_ROC_data["Threshold"] = model_full_results["model comparison thres"].loc["threshold"]["Artificial Neural Networks"]
AUC_ROC_plot= alt.Chart(AUC_ROC_data, height = 200).mark_line().encode(
x = alt.X("FPR", title = "1 - specificity (FPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("TPR", title = "sensitivity (TPR)", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["TPR", "FPR", "AUC ROC"]
)
line = alt.Chart(
pd.DataFrame({"FPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])], "TPR": [min(AUC_ROC_data["FPR"]), max(AUC_ROC_data["FPR"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("FPR"),
alt.Y("TPR"),
)
st.altair_chart(AUC_ROC_plot + line, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_thresAUC")))
st.write("")
# Partial dependence plots
st.write("Partial dependence plots:")
fm_ann_figs2_col1, fm_ann_figs2_col2 = st.beta_columns(2)
for pd_var in expl_var:
pd_data_ann = pd.DataFrame(columns = [pd_var])
pd_data_ann[pd_var] = (model_full_results["ANN partial dependence"][pd_var][1][0]*(df[pd_var].std()))+df[pd_var].mean()
pd_data_ann["Partial dependence"] = model_full_results["ANN partial dependence"][pd_var][0][0]
pd_chart_ann = alt.Chart(pd_data_ann, height = 200).mark_line(color = "darkred").encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("Partial dependence", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["Partial dependence"] + [pd_var]
)
pd_data_ticks_ann = pd.DataFrame(columns = [pd_var])
pd_data_ticks_ann[pd_var] = df[pd_var]
pd_data_ticks_ann["y"] = [model_full_results["ANN partial dependence min/max"]["min"].min()] * df.shape[0]
pd_ticks_ann = alt.Chart(pd_data_ticks_ann, height = 200).mark_tick(size = 5, thickness = 1).encode(
x = alt.X(pd_var, axis = alt.Axis(titleFontSize = 12, labelFontSize = 11), scale = alt.Scale(domain = [pd_data_ticks_ann[pd_var].min(), pd_data_ticks_ann[pd_var].max()])),
y = alt.Y("y", title = "partial dependence", scale = alt.Scale(domain = [model_full_results["ANN partial dependence min/max"]["min"].min(), model_full_results["ANN partial dependence min/max"]["max"].max()]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [pd_var]
)
if expl_var.index(pd_var)%2 == 0:
with fm_ann_figs2_col1:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if expl_var.index(pd_var)%2 == 1:
with fm_ann_figs2_col2:
st.altair_chart(pd_ticks_ann + pd_chart_ann, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_ANN_partDep_bin")))
# Download link for ANN output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["ANN information"].to_excel(excel_file, sheet_name="regression_information")
ann_error_est.to_excel(excel_file, sheet_name="regression_statistics")
ann_varImp_table.to_excel(excel_file, sheet_name="variable_importance")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANN full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Artificial Neural Networks full model output</a>
""",
unsafe_allow_html=True)
st.write("")
# Performance metrics across all models
if any(a for a in sb_ML_alg if a == "Logistic Regression" or a == "Random Forest" or a == "Generalized Additive Models" or a == "Boosted Regression Trees" or a == "Artificial Neural Networks"):
st.markdown("**Model comparison**")
st.write("Threshold-independent metrics:")
st.write((model_full_results["model comparison thresInd"]).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompThresInd")))
st.write("")
st.write("Thresholds:")
st.table(model_full_results["model comparison thres"].transpose().style.set_precision(user_precision))
st.write("")
st.write("Threshold-dependent metrics:")
st.write((model_full_results["model comparison thresDep"]).transpose().style.set_precision(user_precision))
if len(sb_ML_alg) > 1:
if sett_hints:
st.info(str(fc.learning_hints("mod_md_modCompThresDep")))
st.write("")
# Download link for model comparison output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_full_results["model comparison thresInd"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics")
model_full_results["model comparison thres"].to_excel(excel_file, sheet_name="thresholds")
model_full_results["model comparison thresDep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Model comparison full model output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download model comparison output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# FULL MODEL PREDICTIONS
prediction_output = st.beta_expander("Full model predictions", expanded = False)
with prediction_output:
if model_full_results is not None:
#-------------------------------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
MLR_pred_orig = pd.DataFrame(columns = [response_var])
MLR_pred_orig[response_var] = model_full_results["MLR fitted"]
st.write(MLR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
MLR_pred_new = pd.DataFrame(columns = [response_var])
MLR_pred_new[response_var] = model_full_results["MLR prediction"]
st.write(MLR_pred_new.style.set_precision(user_precision))
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
GAM_pred_orig = pd.DataFrame(columns = [response_var])
GAM_pred_orig[response_var] = model_full_results["GAM fitted"]
st.write(GAM_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
GAM_pred_new = pd.DataFrame(columns = [response_var])
GAM_pred_new[response_var] = model_full_results["GAM prediction"]
st.write(GAM_pred_new.style.set_precision(user_precision))
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
RF_pred_orig = pd.DataFrame(columns = [response_var])
RF_pred_orig[response_var] = model_full_results["RF fitted"]
st.write(RF_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
RF_pred_new = pd.DataFrame(columns = [response_var])
RF_pred_new[response_var] = model_full_results["RF prediction"]
st.write(RF_pred_new.style.set_precision(user_precision))
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
BRT_pred_orig = pd.DataFrame(columns = [response_var])
BRT_pred_orig[response_var] = model_full_results["BRT fitted"]
st.write(BRT_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
BRT_pred_new = pd.DataFrame(columns = [response_var])
BRT_pred_new[response_var] = model_full_results["BRT prediction"]
st.write(BRT_pred_new.style.set_precision(user_precision))
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
ANN_pred_orig = pd.DataFrame(columns = [response_var])
ANN_pred_orig[response_var] = model_full_results["ANN fitted"]
st.write(ANN_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
ANN_pred_new = pd.DataFrame(columns = [response_var])
ANN_pred_new[response_var] = model_full_results["ANN prediction"]
st.write(ANN_pred_new.style.set_precision(user_precision))
#-------------------------------------------------------------
# Binary response variable
if response_var_type == "binary":
# MLR specific output
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
st.markdown("**Multiple Linear Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
MLR_pred_orig = pd.DataFrame(columns = [response_var])
MLR_pred_orig[response_var] = model_full_results["MLR fitted"]
st.write(MLR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
MLR_pred_new = pd.DataFrame(columns = [response_var])
MLR_pred_new[response_var] = model_full_results["MLR prediction"]
st.write(MLR_pred_new.style.set_precision(user_precision))
st.write("")
# LR specific output
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
st.markdown("**Logistic Regression**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
LR_pred_orig = pd.DataFrame(columns = [response_var])
LR_pred_orig[response_var] = model_full_results["LR fitted"][:, 1]
LR_pred_orig[response_var + "_binary"] = model_full_results["LR fitted binary"]
st.write(LR_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
LR_pred_new = pd.DataFrame(columns = [response_var])
LR_pred_new[response_var] = model_full_results["LR prediction"][:, 1]
LR_pred_new[response_var + "_binary"] = model_full_results["LR prediction binary"]
st.write(LR_pred_new.style.set_precision(user_precision))
st.write("")
# GAM specific output
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
st.markdown("**Generalized Additive Models**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
GAM_pred_orig = pd.DataFrame(columns = [response_var])
GAM_pred_orig[response_var] = model_full_results["GAM fitted"]
GAM_pred_orig[response_var + "_binary"] = model_full_results["GAM fitted binary"]
st.write(GAM_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
GAM_pred_new = pd.DataFrame(columns = [response_var])
GAM_pred_new[response_var] = model_full_results["GAM prediction"]
GAM_pred_new[response_var + "_binary"] = model_full_results["GAM prediction binary"]
st.write(GAM_pred_new.style.set_precision(user_precision))
st.write("")
# RF specific output
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
RF_pred_orig = pd.DataFrame(columns = [response_var])
RF_pred_orig[response_var] = model_full_results["RF fitted"][:, 1]
RF_pred_orig[response_var + "_binary"] = model_full_results["RF fitted binary"]
st.write(RF_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
RF_pred_new = pd.DataFrame(columns = [response_var])
RF_pred_new[response_var] = model_full_results["RF prediction"][:, 1]
RF_pred_new[response_var + "_binary"] = model_full_results["RF prediction binary"]
st.write(RF_pred_new.style.set_precision(user_precision))
st.write("")
# BRT specific output
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
BRT_pred_orig = pd.DataFrame(columns = [response_var])
BRT_pred_orig[response_var] = model_full_results["BRT fitted"][:, 1]
BRT_pred_orig[response_var + "_binary"] = model_full_results["BRT fitted binary"]
st.write(BRT_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
BRT_pred_new = pd.DataFrame(columns = [response_var])
BRT_pred_new[response_var] = model_full_results["BRT prediction"][:, 1]
BRT_pred_new[response_var + "_binary"] = model_full_results["BRT prediction binary"]
st.write(BRT_pred_new.style.set_precision(user_precision))
st.write("")
# ANN specific output
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
pred_col1, pred_col2 = st.beta_columns(2)
with pred_col1:
st.write("Predictions for original data:")
ANN_pred_orig = pd.DataFrame(columns = [response_var])
ANN_pred_orig[response_var] = model_full_results["ANN fitted"][:, 1]
ANN_pred_orig[response_var + "_binary"] = model_full_results["ANN fitted binary"]
st.write(ANN_pred_orig.style.set_precision(user_precision))
with pred_col2:
if do_modprednew == "Yes":
st.write("Predictions for new data:")
ANN_pred_new = pd.DataFrame(columns = [response_var])
ANN_pred_new[response_var] = model_full_results["ANN prediction"][:, 1]
ANN_pred_new[response_var + "_binary"] = model_full_results["ANN prediction binary"]
st.write(ANN_pred_new.style.set_precision(user_precision))
st.write("")
#-------------------------------------------------------------
st.write("")
# Download links for prediction data
output = BytesIO()
predictions_excel = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Multiple Linear Regression"):
MLR_pred_orig.to_excel(predictions_excel, sheet_name="MLR_pred_orig")
if do_modprednew == "Yes":
MLR_pred_new.to_excel(predictions_excel, sheet_name="MLR_pred_new")
if any(a for a in sb_ML_alg if a == "Logistic Regression"):
LR_pred_orig.to_excel(predictions_excel, sheet_name="LR_pred_orig")
if do_modprednew == "Yes":
LR_pred_new.to_excel(predictions_excel, sheet_name="LR_pred_new")
if any(a for a in sb_ML_alg if a == "Generalized Additive Models"):
GAM_pred_orig.to_excel(predictions_excel, sheet_name="GAM_pred_orig")
if do_modprednew == "Yes":
GAM_pred_new.to_excel(predictions_excel, sheet_name="GAM_pred_new")
if any(a for a in sb_ML_alg if a == "Random Forest"):
RF_pred_orig.to_excel(predictions_excel, sheet_name="RF_pred_orig")
if do_modprednew == "Yes":
RF_pred_new.to_excel(predictions_excel, sheet_name="RF_pred_new")
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
BRT_pred_orig.to_excel(predictions_excel, sheet_name="BRT_pred_orig")
if do_modprednew == "Yes":
BRT_pred_new.to_excel(predictions_excel, sheet_name="BRT_pred_new")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ANN_pred_orig.to_excel(predictions_excel, sheet_name="ANN_pred_orig")
if do_modprednew == "Yes":
ANN_pred_new.to_excel(predictions_excel, sheet_name="ANN_pred_new")
predictions_excel.save()
predictions_excel = output.getvalue()
b64 = base64.b64encode(predictions_excel)
dl_file_name = "Full model predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/predictions_excel;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download full model predictions</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# VALIDATION OUTPUT
if do_modval == "Yes":
val_output = st.beta_expander("Validation output", expanded = False)
with val_output:
if model_val_results is not None:
#------------------------------------
# Continuous response variable
if response_var_type == "continuous":
# Metrics means
st.write("Means of metrics across validation runs:")
st.write(model_val_results["mean"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means")))
# Metrics sd
st.write("SDs of metrics across validation runs:")
st.write(model_val_results["sd"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds")))
st.write("")
st.write("")
val_col1, val_col2 = st.beta_columns(2)
with val_col1:
# Residuals boxplot
if model_val_results["residuals"] is not None:
st.write("Boxplot of residuals across validation runs:")
residual_results = model_val_results["residuals"]
residuals_bplot = pd.melt(residual_results, ignore_index = False, var_name = "Algorithm", value_name = "Residuals")
residuals_boxchart = alt.Chart(residuals_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Residuals", title = "residuals", scale = alt.Scale(zero = False)),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
residuals_plot = residuals_boxchart #+ residuals_scatter
st.altair_chart(residuals_plot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_resBoxplot")))
with val_col2:
# Variance explained boxplot
if model_val_results["variance explained"] is not None:
st.write("Boxplot of % VE across validation runs:")
ve_results = model_val_results["variance explained"]
ve_bplot = pd.melt(ve_results, ignore_index = False, var_name = "Algorithm", value_name = "% VE")
ve_boxchart = alt.Chart(ve_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("% VE", scale = alt.Scale(domain = [min(ve_bplot["% VE"]), max(ve_bplot["% VE"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(ve_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_VEBoxplot")))
st.write("")
# Variable importance (via permutation)
st.write("Means of variable importances:")
varImp_table_mean = model_val_results["variable importance mean"]
st.write(varImp_table_mean.transpose().style.set_precision(user_precision))
st.write("SDs of variable importances:")
varImp_table_sd = model_val_results["variable importance sd"]
st.write(varImp_table_sd.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_varImp")))
st.write("")
st.write("")
# Residuals
if model_val_results["residuals"] is not None:
model_val_res = pd.DataFrame(index = ["min", "25%-Q", "median", "75%-Q", "max"], columns = sb_ML_alg)
for m in sb_ML_alg:
model_val_res.loc["min"][m] = model_val_results["residuals"][m].min()
model_val_res.loc["25%-Q"][m] = model_val_results["residuals"][m].quantile(q = 0.25)
model_val_res.loc["median"][m] = model_val_results["residuals"][m].quantile(q = 0.5)
model_val_res.loc["75%-Q"][m] = model_val_results["residuals"][m].quantile(q = 0.75)
model_val_res.loc["max"][m] = model_val_results["residuals"][m].max()
st.write("Residuals distribution across all validation runs:")
st.write(model_val_res.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_res")))
st.write("")
# Download link for validation output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_val_results["mean"].transpose().to_excel(excel_file, sheet_name="performance_metrics_mean")
model_val_results["sd"].transpose().to_excel(excel_file, sheet_name="performance_metrics_sd")
varImp_table_mean.to_excel(excel_file, sheet_name="variable_importance_mean")
varImp_table_sd.to_excel(excel_file, sheet_name="variable_importance_sd")
model_val_res.transpose().to_excel(excel_file, sheet_name="residuals_distribution")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Validation output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download validation output</a>
""",
unsafe_allow_html=True)
st.write("")
#------------------------------------
# Binary response variable
if response_var_type == "binary":
if model_val_results["mean_ind"].empty:
st.warning("Please select an additional algorithm besides Multiple Linear Regression!")
# Metrics (independent)
if model_val_results["mean_ind"].empty:
st.write("")
else:
st.write("Means of threshold-independent metrics across validation runs:")
st.write(model_val_results["mean_ind"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means_thresInd")))
# Metrics (independent)
if model_val_results["sd_ind"].empty:
st.write("")
else:
st.write("SDs of threshold-independent metrics across validation runs:")
st.write(model_val_results["sd_ind"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds_thresInd")))
st.write("")
st.write("")
val_col1, val_col2 = st.beta_columns(2)
with val_col1:
# AUC ROC boxplot
if model_val_results["AUC ROC"].empty:
st.write("")
else:
st.write("Boxplot of AUC ROC across validation runs:")
auc_results = model_val_results["AUC ROC"]
auc_bplot = pd.melt(auc_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
auc_boxchart = alt.Chart(auc_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "AUC ROC", scale = alt.Scale(domain = [min(auc_bplot["Value"]), max(auc_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(auc_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_AUCBoxplot")))
with val_col2:
# TSS boxplot
if model_val_results["TSS"].empty:
st.write("")
else:
st.write("Boxplot of TSS across validation runs:")
tss_results = model_val_results["TSS"]
tss_bplot = pd.melt(tss_results, ignore_index = False, var_name = "Algorithm", value_name = "Value")
tss_boxchart = alt.Chart(tss_bplot, height = 200).mark_boxplot(color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X("Value", title = "TSS", scale = alt.Scale(domain = [min(tss_bplot["Value"]), max(tss_bplot["Value"])])),
y = alt.Y("Algorithm", title = None),
color = alt.Color("Algorithm", legend = None)
).configure_axis(
labelFontSize = 12,
titleFontSize = 12
)
st.altair_chart(tss_boxchart, use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_TSSBoxplot")))
st.write("")
# Variable importance
st.write("Means of variable importances:")
varImp_table_mean = model_val_results["variable importance mean"]
st.write(varImp_table_mean.style.set_precision(user_precision))
st.write("SDs of variable importances:")
varImp_table_sd = model_val_results["variable importance sd"]
st.write(varImp_table_sd.style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_varImp_bin")))
st.write("")
st.write("")
# Metrics (dependent)
if model_val_results["mean_dep"].empty:
st.write("")
else:
st.write("Means of threshold-dependent metrics across validation runs:")
st.write(model_val_results["mean_dep"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_means_thresDep")))
# Metrics (dependent)
if model_val_results["sd_dep"].empty:
st.write("")
else:
st.write("SDs of threshold-dependent metrics across validation runs:")
st.write(model_val_results["sd_dep"].transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_val_sds_thresDep")))
st.write("")
# Download link for validation output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
model_val_results["mean_ind"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics_mean")
model_val_results["sd_ind"].transpose().to_excel(excel_file, sheet_name="thresh_independent_metrics_sd")
varImp_table_mean.to_excel(excel_file, sheet_name="variable_importance_mean")
varImp_table_sd.to_excel(excel_file, sheet_name="variable_importance_sd")
model_val_results["mean_dep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics_mean")
model_val_results["sd_dep"].transpose().to_excel(excel_file, sheet_name="thresh_dependent_metrics_sd")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Validation output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download validation output</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.warning("Please run models!")
st.write("")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER-TUNING OUTPUT
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Boosted Regression Trees") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
if do_hypTune == "Yes":
hype_title = "Hyperparameter-tuning output"
if do_hypTune != "Yes":
hype_title = "Hyperparameter output"
hype_output = st.beta_expander(hype_title, expanded = False)
with hype_output:
# Random Forest
if any(a for a in sb_ML_alg if a == "Random Forest"):
st.markdown("**Random Forest**")
# Final hyperparameters
if rf_finalPara is not None:
st.write("Final hyperparameters:")
st.table(rf_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if rf_tuning_results is not None and rf_finalPara is not None:
st.write("Tuning details:")
rf_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
rf_finalTuneMetrics["scoring metric"] = [rf_tuning_results.loc["value"]["scoring"]]
rf_finalTuneMetrics["number of models"] = [rf_tuning_results.loc["value"]["number of models"]]
rf_finalTuneMetrics["mean cv score"] = [rf_tuning_results.loc["value"]["mean score"]]
rf_finalTuneMetrics["standard deviation cv score"] = [rf_tuning_results.loc["value"]["std score"]]
rf_finalTuneMetrics["test data score"] = [rf_tuning_results.loc["value"]["test score"]]
st.table(rf_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_RF_details")))
st.write("")
# Boosted Regression Trees
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
st.markdown("**Boosted Regression Trees**")
# Final hyperparameters
if brt_finalPara is not None:
st.write("Final hyperparameters:")
st.table(brt_finalPara.transpose())
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if brt_tuning_results is not None and brt_finalPara is not None:
st.write("Tuning details:")
brt_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
brt_finalTuneMetrics["scoring metric"] = [brt_tuning_results.loc["value"]["scoring"]]
brt_finalTuneMetrics["number of models"] = [brt_tuning_results.loc["value"]["number of models"]]
brt_finalTuneMetrics["mean cv score"] = [brt_tuning_results.loc["value"]["mean score"]]
brt_finalTuneMetrics["standard deviation cv score"] = [brt_tuning_results.loc["value"]["std score"]]
brt_finalTuneMetrics["test data score"] = [brt_tuning_results.loc["value"]["test score"]]
st.table(brt_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_BRT_details")))
st.write("")
# Artificial Neural Networks
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
st.markdown("**Artificial Neural Networks**")
# Final hyperparameters
if ann_finalPara is not None:
st.write("Final hyperparameters:")
st.table(ann_finalPara.transpose().style.format({"L² regularization": "{:.5}"}))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_finPara")))
st.write("")
else:
st.warning("Please run models!")
# Tuning details
if do_hypTune == "Yes":
if ann_tuning_results is not None and ann_finalPara is not None:
st.write("Tuning details:")
ann_finalTuneMetrics = pd.DataFrame(index = ["value"], columns = ["scoring metric", "number of models", "mean cv score", "standard deviation cv score", "test data score"])
ann_finalTuneMetrics["scoring metric"] = [ann_tuning_results.loc["value"]["scoring"]]
ann_finalTuneMetrics["number of models"] = [ann_tuning_results.loc["value"]["number of models"]]
ann_finalTuneMetrics["mean cv score"] = [ann_tuning_results.loc["value"]["mean score"]]
ann_finalTuneMetrics["standard deviation cv score"] = [ann_tuning_results.loc["value"]["std score"]]
ann_finalTuneMetrics["test data score"] = [ann_tuning_results.loc["value"]["test score"]]
st.table(ann_finalTuneMetrics.transpose().style.set_precision(user_precision))
if sett_hints:
st.info(str(fc.learning_hints("mod_md_hypeTune_ANN_details")))
st.write("")
# Download link for hyperparameter output
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara.to_excel(excel_file, sheet_name="RF_final_hyperparameters")
if do_hypTune == "Yes":
rf_finalTuneMetrics.to_excel(excel_file, sheet_name="RF_tuning_details")
if any(a for a in sb_ML_alg if a == "Boosted Regression Trees"):
brt_finalPara.to_excel(excel_file, sheet_name="BRT_final_hyperparameters")
if do_hypTune == "Yes":
brt_finalTuneMetrics.to_excel(excel_file, sheet_name="BRT_tuning_details")
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara.to_excel(excel_file, sheet_name="ANN_final_hyperparameters")
if do_hypTune == "Yes":
ann_finalTuneMetrics.to_excel(excel_file, sheet_name="ANN_tuning_details")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
if do_hypTune == "Yes":
dl_file_name = "Hyperparameter-tuning output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter-tuning output</a>
""",
unsafe_allow_html=True)
if do_hypTune != "Yes":
dl_file_name = "Hyperparameter output__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download hyperparameter output</a>
""",
unsafe_allow_html=True)
st.write("")
#------------------------------------------------------------------------------------------
# MULTI-CLASS CLASSIFICATION
if analysis_type == "Multi-class classification":
#++++++++++++++++++++++++++++++++++++++++++++
# MACHINE LEARNING (PREDICTIVE DATA ANALYSIS)
st.write("")
st.write("")
data_machinelearning_container2 = st.beta_container()
with data_machinelearning_container2:
st.header("**Multi-class classification**")
st.markdown("Go for creating predictive models of your data using machine learning techniques! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
ml_settings = st.beta_expander("Specify models ", expanded = False)
with ml_settings:
# Initial status for running models (same as for regression, bc same functions are used)
run_models = False
sb_ML_alg = "NA"
do_hypTune = "No"
do_modval = "No"
do_hypTune_no = "No hyperparameter tuning"
final_hyPara_values="None"
model_val_results = None
model_full_results = None
gam_finalPara = None
brt_finalPara = None
brt_tuning_results = None
rf_finalPara = None
rf_tuning_results = None
ann_finalPara = None
ann_tuning_results = None
MLR_intercept = None
MLR_cov_type = None
MLR_finalPara = None
MLR_model = "OLS"
LR_cov_type = None
LR_finalPara = None
LR_finalPara = None
if df.shape[1] > 0 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_type = "multi-class"
response_var_options = df.columns
response_var = st.selectbox("Select response variable", response_var_options, key = session_state.id)
# Check how many classes the response variable has (max: 10 classes)
if len(pd.unique(df[response_var])) > 10:
st.error("ERROR: Your response variable has more than 10 classes. Please select a variable with less classes!")
return
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please select a numeric multi-class response variable!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric multi-class response variable!"
elif var_cat.loc[response_var] == "numeric" and df[response_var].dtypes == "float64":
response_var_message_num = "ERROR: Please select a multi-class response variable!"
elif var_cat.loc[response_var] == "binary":
response_var_message_num = "ERROR: Please select a multi-class response variable!"
elif var_cat.loc[response_var] == "numeric" and df[response_var].dtypes == "int64":
response_var_message_cat = "WARNING: Please check whether your response variable is indeed a multi-class variable!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = df.columns
expl_var_options = expl_var_options[expl_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = session_state.id)
var_list = list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithms**")
# Select algorithms
algorithms = ["Random Forest", "Artificial Neural Networks"]
alg_list = list(algorithms)
sb_ML_alg = st.multiselect("Select modelling techniques", alg_list, alg_list)
st.markdown("**Model-specific settings**")
# Logistic Regression settings
# if any(a for a in sb_ML_alg if a == "Logistic Regression"):
# LR_finalPara = pd.DataFrame(index = ["value"], columns = ["intercept", "covType"])
# LR_intercept = "Yes"
# LR_cov_type = "non-robust"
# LR_finalPara["intercept"] = LR_intercept
# LR_finalPara["covType"] = LR_cov_type
# if st.checkbox("Adjust settings for Logistic Regression"):
# col1, col2 = st.beta_columns(2)
# with col1:
# LR_intercept = st.selectbox("Include intercept ", ["Yes", "No"])
# with col2:
# LR_cov_type = st.selectbox("Covariance type", ["non-robust", "HC0"])
# LR_finalPara["intercept"] = LR_intercept
# LR_finalPara["covType"] = LR_cov_type
# st.write("")
# Save hyperparameter values for machine learning methods
final_hyPara_values = {}
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_finalPara = pd.DataFrame(index = ["value"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_finalPara["number of trees"] = [100]
rf_finalPara["maximum tree depth"] = [None]
rf_finalPara["maximum number of features"] = [len(expl_var)]
rf_finalPara["sample rate"] = [0.99]
final_hyPara_values["rf"] = rf_finalPara
if st.checkbox("Adjust settings for Random Forest "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_finalPara["number of trees"] = st.number_input("Number of trees", value=100, step=1, min_value=1)
with col3:
rf_mtd_sel = st.selectbox("Specify maximum tree depth ", ["No", "Yes"])
if rf_mtd_sel == "No":
rf_finalPara["maximum tree depth"] = [None]
if rf_mtd_sel == "Yes":
rf_finalPara["maximum tree depth"] = st.slider("Maximum tree depth ", value=20, step=1, min_value=1, max_value=50)
if len(expl_var) >1:
with col4:
rf_finalPara["maximum number of features"] = st.slider("Maximum number of features ", value=len(expl_var), step=1, min_value=1, max_value=len(expl_var))
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
else:
with col2:
rf_finalPara["sample rate"] = st.slider("Sample rate ", value=0.99, step=0.01, min_value=0.5, max_value=0.99)
final_hyPara_values["rf"] = rf_finalPara
st.write("")
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_finalPara = pd.DataFrame(index = ["value"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "hidden layer sizes", "learning rate", "L² regularization"])
ann_finalPara["weight optimization solver"] = ["adam"]
ann_finalPara["maximum number of iterations"] = [200]
ann_finalPara["activation function"] = ["relu"]
ann_finalPara["hidden layer sizes"] = [(100,)]
ann_finalPara["learning rate"] = [0.001]
ann_finalPara["L² regularization"] = [0.0001]
final_hyPara_values["ann"] = ann_finalPara
if st.checkbox("Adjust settings for Artificial Neural Networks "):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
ann_finalPara["weight optimization solver"] = st.selectbox("Weight optimization solver ", ["adam"])
with col2:
ann_finalPara["activation function"] = st.selectbox("Activation function ", ["relu", "identity", "logistic", "tanh"])
with col3:
ann_finalPara["maximum number of iterations"] = st.slider("Maximum number of iterations ", value=200, step=1, min_value=10, max_value=1000)
with col4:
ann_finalPara["learning rate"] = st.slider("Learning rate ", min_value=0.0001, max_value=0.01, value=0.001, step=1e-4, format="%.4f")
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers", [1, 2, 3])
if number_hidden_layers == 1:
number_nodes1 = st.slider("Number of nodes in hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,)]
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,)]
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer", 5, 500, 100)
number_nodes2 = st.slider("Number of neurons in second hidden layer", 5, 500, 100)
number_nodes3 = st.slider("Number of neurons in third hidden layer", 5, 500, 100)
ann_finalPara["hidden layer sizes"] = [(number_nodes1,number_nodes2,number_nodes3,)]
with col6:
ann_finalPara["L² regularization"] = st.slider("L² regularization ", min_value=0.00001, max_value=0.001, value=0.0001, step=1e-5, format="%.5f")
#--------------------------------------------------------------------------------------
# HYPERPARAMETER TUNING SETTINGS
if len(sb_ML_alg) >= 1:
# Depending on algorithm selection different hyperparameter settings are shown
if any(a for a in sb_ML_alg if a == "Random Forest") or any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
# General settings
st.markdown("**Hyperparameter-tuning settings**")
do_hypTune = st.selectbox("Use hyperparameter-tuning", ["No", "Yes"])
# Save hyperparameter values for all algorithms
hyPara_values = {}
# No hyperparameter-tuning
if do_hypTune == "No":
do_hypTune_no = "Default hyperparameter values are used!"
# Hyperparameter-tuning
elif do_hypTune == "Yes":
st.warning("WARNING: Hyperparameter-tuning can take a lot of time! For tips, please [contact us](mailto:<EMAIL>?subject=Staty-App).")
# Further general settings
hypTune_method = st.selectbox("Hyperparameter-search method", ["random grid-search", "grid-search"])
col1, col2 = st.beta_columns(2)
with col1:
hypTune_nCV = st.slider("Select number for n-fold cross-validation", 2, 10, 5)
if hypTune_method == "random grid-search" or hypTune_method == "Bayes optimization" or hypTune_method == "sequential model-based optimization":
with col2:
hypTune_iter = st.slider("Select number of iterations for search", 20, 1000, 20)
else:
hypTune_iter = False
st.markdown("**Model-specific tuning settings**")
# Random Forest settings
if any(a for a in sb_ML_alg if a == "Random Forest"):
rf_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["number of trees", "maximum tree depth", "maximum number of features", "sample rate"])
rf_tunePara["number of trees"] = [50, 500]
rf_tunePara["maximum tree depth"] = [None, None]
rf_tunePara["maximum number of features"] = [1, len(expl_var)]
rf_tunePara["sample rate"] = [0.8, 0.99]
hyPara_values["rf"] = rf_tunePara
if st.checkbox("Adjust tuning settings for Random Forest"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
with col1:
rf_tunePara["number of trees"] = st.slider("Range for number of trees ", 50, 1000, [50, 500])
with col3:
rf_mtd_choice = st.selectbox("Specify maximum tree depth", ["No", "Yes"])
if rf_mtd_choice == "Yes":
rf_tunePara["maximum tree depth"] = st.slider("Range for maximum tree depth ", 1, 50, [2, 10])
else:
rf_tunePara["maximum tree depth"] = [None, None]
with col4:
if len(expl_var) > 1:
rf_tunePara["maximum number of features"] = st.slider("Range for maximum number of features", 1, len(expl_var), [1, len(expl_var)])
else:
rf_tunePara["maximum number of features"] = [1,1]
with col2:
rf_tunePara["sample rate"] = st.slider("Range for sample rate ", 0.5, 0.99, [0.8, 0.99])
hyPara_values["rf"] = rf_tunePara
# Artificial Neural Networks settings
if any(a for a in sb_ML_alg if a == "Artificial Neural Networks"):
ann_tunePara = pd.DataFrame(index = ["min", "max"], columns = ["weight optimization solver", "maximum number of iterations", "activation function", "number of hidden layers", "nodes per hidden layer", "learning rate","L² regularization"])# "learning rate schedule", "momentum", "epsilon"])
ann_tunePara["weight optimization solver"] = list([["adam"], "NA"])
ann_tunePara["maximum number of iterations"] = [100, 200]
ann_tunePara["activation function"] = list([["relu"], "NA"])
ann_tunePara["number of hidden layers"] = list([1, "NA"])
ann_tunePara["nodes per hidden layer"] = [50, 100]
ann_tunePara["learning rate"] = [0.0001, 0.002]
ann_tunePara["L² regularization"] = [0.00001, 0.0002]
hyPara_values["ann"] = ann_tunePara
if st.checkbox("Adjust tuning settings for Artificial Neural Networks"):
col1, col2 = st.beta_columns(2)
col3, col4 = st.beta_columns(2)
col5, col6 = st.beta_columns(2)
with col1:
weight_opt_list = st.selectbox("Weight optimization solver ", ["adam"])
if len(weight_opt_list) == 0:
weight_opt_list = ["adam"]
st.warning("WARNING: Default value used 'adam'")
ann_tunePara["weight optimization solver"] = list([[weight_opt_list], "NA"])
with col2:
ann_tunePara["maximum number of iterations"] = st.slider("Maximum number of iterations (epochs) ", 10, 1000, [100, 200])
with col3:
act_func_list = st.multiselect("Activation function ", ["identity", "logistic", "tanh", "relu"], ["relu"])
if len(act_func_list) == 0:
act_func_list = ["relu"]
st.warning("WARNING: Default value used 'relu'")
ann_tunePara["activation function"] = list([act_func_list, "NA"])
with col5:
number_hidden_layers = st.selectbox("Number of hidden layers ", [1, 2, 3])
ann_tunePara["number of hidden layers"] = list([number_hidden_layers, "NA"])
# Cases for hidden layers
if number_hidden_layers == 1:
ann_tunePara["nodes per hidden layer"] = st.slider("Number of nodes in hidden layer ", 5, 500, [50, 100])
if number_hidden_layers == 2:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
if number_hidden_layers == 3:
number_nodes1 = st.slider("Number of neurons in first hidden layer ", 5, 500, [50, 100])
number_nodes2 = st.slider("Number of neurons in second hidden layer ", 5, 500, [50, 100])
number_nodes3 = st.slider("Number of neurons in third hidden layer ", 5, 500, [50, 100])
min_nodes = list([number_nodes1[0], number_nodes2[0], number_nodes3[0]])
max_nodes = list([number_nodes1[1], number_nodes2[1], number_nodes3[1]])
ann_tunePara["nodes per hidden layer"] = list([min_nodes, max_nodes])
with col6:
if weight_opt_list == "adam":
ann_tunePara["learning rate"] = st.slider("Range for learning rate ", 0.0001, 0.01, [0.0001, 0.002], step=1e-4, format="%.4f")
with col4:
ann_tunePara["L² regularization"] = st.slider("L² regularization parameter ", 0.0, 0.001, [0.00001, 0.0002], step=1e-5, format="%.5f")
hyPara_values["ann"] = ann_tunePara
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.beta_columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = | pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python') | pandas.read_csv |
import pandas as pd
import sys
import time
import os
from data import get_ibge_code_list
from crawler_utils import get_city_beds
from crawler_utils import get_bed_codes
list_city = get_ibge_code_list()
df_beds = | pd.DataFrame(columns=['codibge', 'Codigo', 'Descrição', 'Existente', 'Sus', 'Não Sus']) | pandas.DataFrame |
import requests
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# Enforce incognito mode
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
from selenium.webdriver.common.keys import Keys
from webdriver_manager.firefox import GeckoDriverManager
import numpy as np
from numpy import array
import pandas as pd
import csv
from datetime import date, datetime
# PULLING BY SEASON
def nhl_pull(str_url):
driver.get(str_url); # get to the url
try: # Wait until the table appears - JavaScript table may appear slower than other page elements
element = WebDriverWait(driver, 50).until(
EC.presence_of_element_located((By.CLASS_NAME, "rt-table"))
)
finally:
None
time.sleep(2); #Just in case
# Pull from information
html = driver.page_source # Pull the script information
soup = BeautifulSoup(html) # Soupify
# Get table header
rtheader = soup.find_all("div", {"class": "rt-table"})
n_pagecount = int(soup.find_all("span", {"class": "-totalPages"})[0].text) - 1; # number of pages to scrape
# NOTE: page numbers are zero indexed. be careful - using index, number of pages to pull
# Inside a function - this is throwing an error
tableheader = soup.find_all("div", {"class": "tableHeaderDiv"})[0].find_all("div", {"class": "rt-header-cell"})
str_titles = ["idx_row"]#['season start', 'season end']
for temp_str in tableheader:
temp_str_extract = temp_str.get('title');
if temp_str_extract == None:
temp_str_extract
else:
str_titles.append(temp_str_extract)
n_title = len(str_titles);
# Pulling the data.
table_data = soup.find_all("div", {"class": "rt-tbody"})[0].find_all("div", {"class" : "rt-tr-group"})
ary_data = [];
for idx_count, iter_row in enumerate(table_data):
each_row = iter_row.find_all("div", {"class" : "rt-td"})
temp_vec = [];
for iter_col in each_row:
temp_vec.append(iter_col.text) # save the data in order
if idx_count == 0: #start creating the array
ary_data = np.array(temp_vec)
else: # Do horizontal stack
ary_data = np.vstack((ary_data, np.array(temp_vec)))
# Convert to data frame
# Note: converting to array just in case it becomes a one row list.
df_data = pd.DataFrame(np.reshape(ary_data, (-1, len(str_titles))), columns = str_titles)
# Pull total record count
n_recordcount = int(soup.find_all("span", {"class": "-totalInfo"})[0].text.split()[0]);
return {'df': df_data, 'n_pagecount': n_pagecount, 'n_title': n_title, "n_recordcount" : n_recordcount} # Return the dataframe of data & pagecount for multiple pages to pull
def strip_right(df, suffix):
df.columns = df.columns.str.rstrip(suffix)
return df
# Pull URL of the team
def url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype):
URL_team_summary = (f"http://www.nhl.com/stats/"
f"{idx_data_type}?aggregate=0&{idx_report}reportType={idx_report_type}&"
f"{idx_datetype}From={iter_date_start}&{idx_datetype}To={iter_date_end}&"
f"gameType={str_gametype}&filter=gamesPlayed,gte,1&page={i_npage}&pageSize=100")
# Note that in previous iteration idx_aggregate == 'aggregate=0&' - no need because the workflow is pulled by season.
return URL_team_summary
def nhl_pull_loop(str_date_start, str_date_end, str_page, idx_data_type, idx_report_type, idx_datetype):
for idx, iter_date_start in enumerate(str_date_start):
iter_date_end = str_date_end[idx];
df_fin = [];
for idx_game, iter_game in enumerate(["regular", "playoff"]):
# In-loop-specific initial settings
str_gametype = idx_game + 2; # start with regular season
i_npage = 0; # start with page 1
idx_report = ''; # start with the summary page
# temporary data frame save
temp_df = [];
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary)
temp_df = temp_pulled['df']; # Initialize
npage = temp_pulled['n_pagecount'];
nrecord = temp_pulled['n_recordcount'];
if nrecord == 0:
continue # break out from the loop.
else: # Continue pulling the data for having a record
# For more than one record
if npage != 0:
for i_npage in range(1, npage + 1): # Python range, need to add one.
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary)
temp_df = temp_df.append(temp_pulled['df']);
else:
None
# All summary data pulled, remove empty rows
temp_df = temp_df.loc[(temp_df.idx_row != '\xa0'),:];
# Summary stats, just to check the right count of data.
#temp_df.to_csv(f'df_{idx_data_type}_{idx_report_type}_{iter_season}_summaryOnly.csv',index = False)
# Pull other data - more specific statistics,
for temp_idx in str_page:
# Set specific parameters for different categories - pages
idx_report = "report=" + temp_idx + "&";
i_npage = 0; # start with page 1, Reset
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
# Pull date
temp_pulled = nhl_pull(URL_team_summary)
# Because this is different categories - neeed to make master partial file
temp_df_partial = temp_pulled['df'];
# Need to join the data frame
npage = temp_pulled['n_pagecount']
if npage != 0: # Pull data from multiple pages
for i_npage in range(1, npage + 1): # Python range, need to add one.
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary); # Pull additional data
temp_df_partial = temp_df_partial.append(temp_pulled['df']); # stack multiple pages
else:
None
# Save the data
# First, must clean up the empty rows, just to make sure not to join empty-empty
temp_df_partial = temp_df_partial.loc[(temp_df_partial.idx_row != '\xa0'),:];
if (temp_pulled['df'].size != 0): # If the page has at least one entry
if idx_data_type == 'teams': # For merging team statistics
if idx_report_type == 'season':
temp_df = | pd.merge(temp_df, temp_df_partial, how = 'left', on = "Team", suffixes=('_x', '_y')) | pandas.merge |
#!/usr/bin/python3
import sys
sys.path.insert(0, "/home/eric/ramukcire/estimating_cost_of_dc_services/syscost/")
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import re
from collections import Counter
import itertools
import warnings
from termcolor import colored
import streamlit as st
from subprocess import check_output
import traffic.traffic as traffic
# from traffic.traffic import traffic
from datetime import datetime as dt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
print(colored('Imported Modules\n', 'yellow'))
print(colored('Running from '+str((os.getcwd())),'green'))
#print(colored('Other directories at this level are '+ str(os.listdir()),'red'))
st.title('Total Cost of Ownership Model (Hardy)')
st.subheader('<NAME>, Doctor of Design')
'''This script will run the Hardy model. For now it will not interact \
with the model directly, but will be able to consume the outputs \
from the Perl program, parse it, and pass it to SysCost EEIO inputs. \
'''
class costet(object):
def __init__(self, input_dc, input_r, streamlit=True, model_file=None):
'''Args: Runs the specified parameter TCO model.
input_dc: "input_example/dc.params" (Data-Center Parameters)
input_r: "input_example/r.params" (Resource Parameters)
streamlit = boolean for using streamlit
model_file = file name for the model output'''
self.input_dc = input_dc
self.input_r = input_r
self.model = check_output(["perl", "./cost-et.pl", input_dc, input_r], shell = False)
self.model = self.model.decode("utf-8")
self.streamlit = streamlit
self.model_file = model_file
def view_raw_output(self, save=None):
if self.streamlit is True:
st.header('Model run for ' +self.input_dc+' with '+self.input_r)
st.subheader('Output from Cost-ET Model run')
st.text(self.model)
if save is not None:
f = open(self.model_file, "w+")
f.write(str(self.model))
f.close()
print(colored('This is the output from the Cost-ET model: ' + self.model, 'yellow'))
def view_script(self, script):
'''Args: script: "cost-et.pl" '''
f = open(script, "r")
f = f.read()
print(colored('Print this :'+ f, 'magenta'))
if self.streamlit is True:
st.subheader('Print out of '+script)
st.code(f, language='perl')
def get_dc_params(self):
_df = pd.read_csv(self.model_file)[2:24].reset_index(drop=True)
_df.columns = ['DC_parameters']
_df[['DC Param','Value']] = _df['DC_parameters'].str.split("=",expand=True)
_df = _df[['DC Param','Value']]
if self.streamlit is True:
st.subheader('DC Parameters: ')
st.dataframe(_df, 500, 600)
return _df
def get_resource_params(self):
_df = pd.read_csv(self.model_file)[29:76].reset_index(drop=True)
_df.columns = ['Resource_parameters']
_df[['Resource','Value']] = _df['Resource_parameters'].str.split("=",expand=True)
_df = _df[['Resource','Value']]
if self.streamlit is True:
st.subheader('Resources Parameters: ')
st.dataframe(_df, 500, 600)
return _df
def get_server_age(self):
_df = pd.read_csv(self.model_file)[79:85].reset_index(drop=True)
_df.columns = ['Age Dist']
_df[['Age (Years)','Server Count']] = _df['Age Dist'].str.split(" ",expand=True)
_df = _df[['Age (Years)','Server Count']]
if self.streamlit is True:
st.subheader('Age: ')
st.dataframe(_df, 500, 1000)
return _df
def get_server_replacement(self):
'''Unclear what this calue means ATM.'''
_df = pd.read_csv(self.model_file)[85:86].reset_index(drop=True)
_df.columns = ['Server Replacements']
_df[['Count','Server Count']] = _df['Server Replacements'].str.split(" ",expand=True)
_df = _df[['Count']]
if self.streamlit is True:
st.subheader('Server Replacement: ')
st.dataframe(_df, 500, 1000)
return _df
def get_dc_costs(self):
# This requires that the model be run. To create a new model txt file.
_df = pd.read_csv(self.model_file)[90:96].reset_index(drop=True)
_df.columns = ['DC Costs']
_df[['Cost_Component', 'Cost', 'Unit', 'Relative']] = _df['DC Costs'].str.split(" ",expand=True)
_df = _df[['Cost_Component','Cost','Unit', 'Relative']].iloc[1:,:]
_df['Cost_Component'] = _df['Cost_Component'].str[:-1]
_df.set_index('Cost_Component', inplace = True)
#_df.index = _df['Cost_Component']
if self.streamlit is True:
st.subheader('Data Center Costs: ')
st.dataframe(_df, 500, 1000)
return _df
def get_dc_tco(self):
_df = pd.read_csv(self.model_file)[90:96].reset_index(drop=True)
_df.columns = ['DC Costs']
_df[['Cost_Component','Cost','Unit', 'Relative']] = _df['DC Costs'].str.split(" ",expand=True)
_df = _df[['Cost_Component','Cost','Unit']].iloc[:1,:]
_df['Cost_Component'] = _df['Cost_Component'].str[:-1]
# _df.set_index('Cost_Component', inplace = True)
if self.streamlit is True:
st.subheader('Data Center Total Cost of Ownership: ')
st.dataframe(_df, 500, 1000)
return _df
def plot_dc_costs(self, plot):
'''Create and Save figure. The figure is then loaded as an image into Streamlit.
Args: plot: the file to save name to save the figure as'''
_df = self.get_dc_costs()
plot_file = '../images/'+str(plot)+'.png'
_plt = _df[['Cost']].copy()
_plt.Cost = _plt.Cost.astype(float)
_plt.plot(kind='bar')
plt.savefig(plot_file)
if self.streamlit is True:
st.subheader('Plot of Data Center Costs: ')
st.image(plot_file)
print(_plt)
def res_dims(self):
'''Provides the units of input and outputs'''
_df = pd.read_csv(self.model_file)[96:102]
_df.columns = ['Resource Dims']
_df = _df['Resource Dims'].str.split(":",expand=True)
_df[1].str.lstrip(' ')
_df[1] = _df[1].replace('\t',' ', regex=True)
regex = _df[1].str.extract(r'(\d+\.*\d*)\s*(\w+\^*\d*)*', expand = True)
_df = pd.concat([_df, regex], axis =1)
_df.columns = ['parameter','mix_value', 'value','units']
_df.set_index('parameter', inplace = True)
_df = _df[['value', 'units']].fillna(' ')
if self.streamlit is True:
st.subheader('Resource Dimensions: ')
st.dataframe(_df, 500, 1000)
class eeio(object):
def __init__(self, input_csv):
self.Introduction = pd.read_pickle('eeio_data/Introduction.pkl')
self.Environmental_inventory = pd.read_pickle('eeio_data/Environmental_inventory.pkl')
self.TRACIimpacts_of_selected_effect = pd.read_pickle('eeio_data/TRACIimpacts of selected effect.pkl')
self.Select_activity_in_a_sector = pd.read_pickle('eeio_data/Select_activity_in_a_sector.pkl')
self.Leontief_inverse_of_A = pd.read_pickle('eeio_data/Leontief_inverse_of_A.pkl') #total requirements table (or matrix)
self.TRACI_impacts = pd.read_pickle('eeio_data/TRACI_impacts.pkl')
self.Economic_impact = pd.read_pickle('eeio_data/Economic_impact.pkl')
self.Economic_impact_ranked = pd.read_pickle('eeio_data/Economic_impact_ranked.pkl')
self.Emission_employmeDiscussionnt_impacts = pd.read_pickle('eeio_data/Emission_employment_impacts.pkl')
self.Environment_intensity_matrix = pd.read_pickle('eeio_data/Environment_intensity_matrix.pkl')
self.Transpose_of_Env_matrix = pd.read_pickle('eeio_data/Transpose_of_Env_matrix.pkl')
self.TRACI_characterization_factor = pd.read_pickle('eeio_data/TRACI_characterization_factor.pkl')
self.Transpose_of_Env_matrix = pd.read_pickle('eeio_data/Transpose_of_Env_matrix.pkl')
self.Transpose_of_TRACI_factor = pd.read_pickle('eeio_data/Transpose_of_TRACI_factor.pkl')
self.commodities = pd.read_csv('sectors.csv').Sector.to_list()
self.input_df = pd.read_csv(input_csv)
def get_sectors(self):
df = self.input_df[['Sector']]
return df
def get_costs(self):
df = self.input_df[['Relative Costs']]
return df
def Matrix_A(self):
df = pd.read_pickle('eeio_data/Matrix_A.pkl') #direct requirements table(or matrix).
df.rename(index = {'2122a0/iron, gold, silver, and other metal ores/us ': \
'2122a0/iron, gold, silver, and other metal ores/us'}, inplace = True)
return df
def I_matrix(self):
df = pd.read_pickle('eeio_data/I_matrix.pkl')
df = df.astype(float)
return df
def IplusA(self):
df = self.I_matrix() + self.Matrix_A()
return df
def IplusA_total(self):
df = self.IplusA().sum(axis=(1))
return df
def IminusA(self):
df = np.subtract(self.I_matrix(), self.Matrix_A())
return df
def IminusA_total(self):
df = self.IminusA().sum(axis=(1))
return df
def Inv_IminusA(self):
df = pd.DataFrame(np.linalg.inv(self.IminusA()))
df.index = self.IminusA().index
df.columns = self.IminusA().columns
return df
def activity(self):
act = self.Select_activity_in_a_sector.set_index('Industry code/Industry name', drop=True)
return act #[act['Amount of economic activity(2013 dollar)']==0]
def search(self, term):
s = []
for word in term:
activity = self.activity()
w=activity[activity.index.str.contains(word)].index
s.append(w)
return pd.DataFrame(s, index=[term]).T#.fillna('-')
def input(self):
sectors = self.get_sectors()
costs = self.get_costs()
activity = self.activity()
for i in range(len(sectors)):
activity.loc[sectors.iloc[i], 'Amount of economic activity(2013 dollar)'] = float(costs.iloc[i])
inputs = activity[activity['Amount of economic activity(2013 dollar)']!=0]
return activity, inputs
def direct_costs(self):
df = self.IplusA()@self.input()[0]
df.columns=['direct_costs']
return df
def total_costs(self):
df = self.Inv_IminusA()@self.input()[0]
df.columns = ['total_costs']
return df
def econ_impacts(self, rank='total'):
df = pd.concat([self.total_costs(),self.direct_costs()], axis=1)
if rank == 'total':
df = df.sort_values(by=['total_costs'], ascending = False)
elif rank == 'direct':
df = df.sort_values(by=['direct_costs'], ascending = False)
return df
def level_sectors(self, cost='total'):
if cost == 'total':
df = self.total_costs()
elif cost == 'direct':
df = self.direct_costs()
df = df.loc[df.index.isin(self.commodities)]
return df
def env_inventory(self):
df = pd.DataFrame([email protected]_IminusA())
total_env_vector = df.sum(axis=1)
return df, total_env_vector
def emission_emp_impacts(self):
env_inv_t = self.Environment_intensity_matrix.T.values
df = pd.DataFrame(env_inv_t * self.econ_impacts(rank='total')[['total_costs']].values)
df.columns = self.Environment_intensity_matrix.T.columns
df.index= self.econ_impacts(rank='total')[['total_costs']].index
total_emissions_impact = df.sum()
return df, total_emissions_impact
def TRACI(self):
df = [email protected]_inventory()[0]
return df
def TRACI_impacts_of_selected_effect(self):
df = self.emission_emp_impacts()[0]@self.TRACI_characterization_factor.T
return df
class buildings(object):
"""Args:
power: the nominal power rating of the data-center in kW.
over-sub: over subcription ratio"""
def __init__(self, power, osr=0):
self.power = power # power in KW
self.osr = osr
def get_building(self, column = "Relative Costs", out_csv = False ):
Y = | pd.read_csv('Y_building.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 00:37:32 2019
@author: tungutokyo
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
pd.set_option("display.max_columns", 60)
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
def get_RandSearchCV(X_train, y_train, X_test, y_test, scoring, type_search, output_file):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
st_t = dt.now()
# Numer of trees are used
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
#n_estimators = list(np.arange(100,1000,50))
#n_estimators = [1000]
# Maximum depth of each tree
max_depth = [5, 10, 25, 50, 75, 100]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 8, 10]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 8, 10]
# Maximum numeber of features to consider for making splits
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion="gini", random_state=42)
base_model_gb = GradientBoostingClassifier(criterion="friedman_mse", random_state=42)
# Run randomzed search
n_iter_search = 30
if type_search == "RandomSearchCV-RandomForest":
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
else:
rsearch_cv = RandomizedSearchCV(estimator=base_model_gb,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train, y_train)
#f = open("output.txt", "a")
print("Best estimator obtained from CV data: \n", rsearch_cv.best_estimator_, file=output_file)
print("Best Score: ", rsearch_cv.best_score_, file=output_file)
return rsearch_cv
def performance_rand(best_clf, X_train, y_train, X_test, y_test, type_search, num_class, output_file, class_name):
#f = open("output.txt", "a")
print("-"*100)
print("~~~~~~~~~~~~~~~~~~ PERFORMANCE EVALUATION ~~~~~~~~~~~~~~~~~~~~~~~~", file=output_file)
print("Detailed report for the {} algorithm".format(type_search), file=output_file)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
test_accuracy = accuracy_score(y_test, y_pred, normalize=True) * 100
points = accuracy_score(y_test, y_pred, normalize=False)
print("The number of accurate predictions out of {} data points on unseen data is {}".format(
X_test.shape[0], points), file=output_file)
print("Accuracy of the {} model on unseen data is {}".format(
type_search, np.round(test_accuracy, 2)), file=output_file)
print("Precision of the {} model on unseen data is {}".format(
type_search, np.round(metrics.precision_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("Recall of the {} model on unseen data is {}".format(
type_search, np.round(metrics.recall_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("F1 score of the {} model on unseen data is {}".format(
type_search, np.round(metrics.f1_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("\nClassification report for {} model: \n".format(type_search), file=output_file)
print(metrics.classification_report(y_test, y_pred), file=output_file)
plt.figure(figsize=(12,12))
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("\nThe Confusion Matrix: \n", file=output_file)
print(cnf_matrix, file=output_file)
#class_name = ["CDI", "ignore-nonCDI", "Health"]
#class_name = ["CRC", "Adenomas", "Health"]
# class_name = ["OB", "OW", "Health"]
class_name = class_name
cmap = plt.cm.Blues
plt.imshow(cnf_matrix_norm, interpolation="nearest", cmap=cmap)
plt.colorbar()
fmt = ".2g"
thresh = cnf_matrix_norm.max()/2
for i, j in itertools.product(range(cnf_matrix_norm.shape[0]), range(cnf_matrix_norm.shape[1])):
plt.text(j,i,format(cnf_matrix_norm[i,j], fmt), ha="center", va="center",
color="white" if cnf_matrix_norm[i,j] > thresh else "black", fontsize=35)
plt.xticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.yticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.ylabel("True label", fontsize=30)
plt.xlabel("Predicted label", fontsize=30)
plt.ylim((num_class - 0.5, -0.5))
plt.show()
#plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
"""
cmap = plt.cm.Blues
sns.heatmap(cnf_matrix_norm, annot=True, cmap=cmap, fmt=".2f", annot_kws={"size":15}, linewidths=.05)
if type_search == "RandomSearchCV-RandomForest":
plt.title("The Normalized Confusion Matrix - {}".format("RandomForest"), fontsize=20)
else:
plt.title("The Normalized Confusion Matrix - {}".format("GradientBoosting"), fontsize=20)
plt.ylabel("True label", fontsize=15)
plt.xlabel("Predicted label", fontsize=15)
plt.show()
"""
print("\nROC curve and AUC")
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
y_test_cat = np.array(pd.get_dummies(y_test))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_class):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_cat[:,i], y_pred_prob[:,i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_class)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_class):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_class
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
plt.figure(figsize=(12,12))
plt.plot(fpr["macro"], tpr["macro"],
label = "macro-average ROC curve with AUC = {} - Accuracy = {}%".format(
round(roc_auc["macro"], 2), round(test_accuracy, 2)),
color = "navy", linestyle=":", linewidth=4)
colors = cycle(["red", "orange", "blue", "pink", "green"])
for i, color in zip(range(num_class), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label = "ROC curve of class {0} (AUC = {1:0.2f})".format(i, roc_auc[i]))
plt.plot([0,1], [0,1], "k--", lw=2)
plt.title("ROC-AUC for Random Forest".format(type_search), fontsize=20)
plt.xlabel("False Positive Rate", fontsize=15)
plt.ylabel("True Positive Rate", fontsize=15)
plt.legend(loc="lower right")
plt.show()
importances = best_clf.feature_importances_
indices = np.argsort(importances)[::-1]
return {"importance": importances,
"index": indices,
"y_pred": y_pred,
"y_pred_prob": y_pred_prob}
def RF_classifier(X_train, y_train, X_test, y_test, scoring, type_search, num_class, output_file, top_feature, class_name):
#f = open("output.txt", "a")
print("*"*100)
print("Starting {} steps with {} for evaluation rules...".format(type_search, scoring))
print("*"*100)
rsearch_cv = get_RandSearchCV(X_train, y_train, X_test, y_test, scoring, type_search, output_file)
best_estimator = rsearch_cv.best_estimator_
max_depth = rsearch_cv.best_estimator_.max_depth
n_estimators = rsearch_cv.best_estimator_.n_estimators
var_imp_rf = performance_rand(best_estimator, X_train, y_train, X_test, y_test, type_search,
num_class, output_file, class_name)
print("\n~~~~~~~~~~~~~ Features ranking and ploting ~~~~~~~~~~~~~~~~~~~~~\n", file=output_file)
importances_rf = var_imp_rf["importance"]
indices_rf = var_imp_rf["index"]
y_pred = var_imp_rf["y_pred"]
feature_tab = pd.DataFrame({"Features" : list(X_train.columns),
"Importance": importances_rf})
feature_tab = feature_tab.sort_values("Importance", ascending = False).reset_index(drop=True)
print(feature_tab, file=output_file)
#index = np.arange(len(X_train.columns))
#importance_desc = sorted(importances_rf)
index = feature_tab["Features"].iloc[:top_feature]
importance_desc = feature_tab["Importance"].iloc[:top_feature]
feature_space = []
for i in range(indices_rf.shape[0]-1, -1, -1):
feature_space.append(X_train.columns[indices_rf[i]])
fig, ax = plt.subplots(figsize=(20,25))
ax = plt.gca()
plt.title("Feature importances of Random Forest".format(type_search), fontsize=30)
plt.barh(index, importance_desc, align="center", color="blue", alpha=0.6)
plt.grid(axis="x", color="white", linestyle="-")
plt.yticks(fontsize=30)
plt.xticks(fontsize=20)
plt.xlabel("The Average of Decrease in Impurity", fontsize=20)
plt.ylabel("Features", fontsize=30)
ax.tick_params(axis="both", which="both", length=0)
plt.show()
return {"Main Results": var_imp_rf,
"Importance Features": feature_tab}
###############################################################################
################### Stratified K-Folds cross-validator ########################
###############################################################################
def RF_SKF(rf, X, y, num_cv = 5, random_state = 42):
skf = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 42)
test_accuracies = 0
test_precisions = 0
test_recalls = 0
test_f1s = 0
cv_count = 0
# rf = RandomForestClassifier(n_estimators = 100)
for train, test in skf.split(X,y):
probas_ = rf.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
y_pred = rf.predict(X.iloc[test])
test_accuracy = metrics.accuracy_score(y.iloc[test], y_pred, normalize = True) * 100
test_accuracies += test_accuracy
test_precision = metrics.precision_score(y.iloc[test], y_pred, average="macro")
test_precisions += test_precision
test_recall_score = metrics.recall_score(y.iloc[test], y_pred, average="macro")
test_recalls += test_recall_score
test_f1_score = metrics.f1_score(y.iloc[test], y_pred, average="macro")
test_f1s += test_f1_score
cv_count += 1
test_accuracies /= cv_count
test_precisions /= cv_count
test_recalls /= cv_count
test_f1s /= cv_count
return {i: j for i, j in
zip(("Accuracy", "Precision_Score", "Recall_Score", "F1_Score"),
(test_accuracies, test_precisions, test_recalls, test_f1s))}
def RF_SKF_search(X, y, n_est, crit, max_depth, min_split, min_leaf, max_feature,
num_cv = 5, random_state = 42, report_loop = True):
print(n_est, crit, min_split, min_leaf)
rf = RandomForestClassifier(n_estimators = n_est,
max_depth = max_depth,
criterion = crit,
min_samples_split = min_split,
min_samples_leaf = min_leaf,
max_features = max_feature,
random_state = random_state)
# Cross_validated results
try:
results = RF_SKF(rf, X, y, num_cv = num_cv, random_state = random_state)
except:
results = {"Accuracy": np.nan}
# Get oob_score for non-cross validated results
rf = RandomForestClassifier(n_estimators = n_est,
max_depth = max_depth,
criterion = crit,
min_samples_split = min_split,
min_samples_leaf = min_leaf,
max_features = max_feature,
random_state = random_state,
oob_score = True)
try:
score = rf.fit(X, y).oob_score_
except:
score = np.nan
if report_loop == True:
print("Finished. (Accuracy = {:.2f}%)".format(results["Accuracy"]))
return [n_est, crit, max_depth, min_split, min_leaf, max_feature,
results["Accuracy"], results["Precision_Score"], results["Recall_Score"], results["F1_Score"], score]
def RF_SKF_run(X, y, report_loop = True):
# Numer of trees are used
n_estimators = [50, 100, 150, 200, 250, 300]
criterion = ["gini", "entropy"]
# Maximum depth of each tree
max_depths = [5, 10, 25, 50, 75, 100]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 8, 10]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 8, 10]
# Maximum numeber of features to consider for making splits
max_featuress = ["auto", "sqrt", "log2", None]
random_state = 42
rf_result_all = []
for crit in criterion:
for min_split in min_samples_split:
for min_leaf in min_samples_leaf:
for n_est in n_estimators:
for max_depth in max_depths:
for max_features in max_featuress:
rf_result = RF_SKF_search(X, y, n_est, crit, max_depth,
min_split, min_leaf, max_features, random_state,
report_loop = report_loop)
rf_result_all.append(rf_result)
rf_result_df = pd.DataFrame(rf_result_all,
columns = ["n_estimators", "criterion", "max_depth",
"min_samples_split", "min_samples_leaf", "max_features",
"Accurancy", "Precision_Score", "Recall_Score", "F1_score",
"oob_score"]).sort_values("Accurancy", ascending = False).reset_index(drop=True)
return rf_result_df
def performance_SKF(best_clf, X_train, y_train, X_test, y_test, type_search, num_class, output_file):
#f = open("output.txt", "a")
print("-"*100)
print("~~~~~~~~~~~~~~~~~~ PERFORMANCE EVALUATION ~~~~~~~~~~~~~~~~~~~~~~~~", file=output_file)
print("Detailed report for the {} algorithm".format(type_search), file=output_file)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
test_accuracy = accuracy_score(y_test, y_pred, normalize=True) * 100
points = accuracy_score(y_test, y_pred, normalize=False)
print("The number of accurate predictions out of {} data points on unseen data is {}".format(
X_test.shape[0], points), file=output_file)
print("Accuracy of the {} model on unseen data is {}".format(
type_search, np.round(test_accuracy, 2)), file=output_file)
print("Precision of the {} model on unseen data is {}".format(
type_search, np.round(metrics.precision_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("Recall of the {} model on unseen data is {}".format(
type_search, np.round(metrics.recall_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("F1 score of the {} model on unseen data is {}".format(
type_search, np.round(metrics.f1_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("\nClassification report for {} model: \n".format(type_search), file=output_file)
print(metrics.classification_report(y_test, y_pred), file=output_file)
plt.figure(figsize=(12,12))
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("\nThe Confusion Matrix: \n", file=output_file)
print(cnf_matrix, file=output_file)
"""
cmap = plt.cm.Blues
plt.imshow(cnf_matrix, interpolation="nearest", cmap=cmap)
plt.colorbar()
fmt = "d"
thresh = cnf_matrix.max()/2
for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])):
plt.text(j,i,format(cnf_matrix[i,j], fmt), ha="center", va="center",
color="white" if cnf_matrix[i,j] > thresh else "black")
"""
cmap = plt.cm.Blues
sns.heatmap(cnf_matrix_norm, annot=True, cmap=cmap, fmt=".2f", annot_kws={"size":15}, linewidths=.05)
plt.title("The Normalized Confusion Matrix - {}".format(type_search), fontsize=20)
plt.ylabel("True label", fontsize=15)
plt.xlabel("Predicted label", fontsize=15)
plt.show()
print("\nROC curve and AUC")
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
y_test_cat = np.array(pd.get_dummies(y_test))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_class):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_cat[:,i], y_pred_prob[:,i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_class)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_class):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_class
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = metrics.auc(fpr["macro"], tpr["macro"])
plt.figure(figsize=(12,12))
plt.plot(fpr["macro"], tpr["macro"],
label = "macro-average ROC curve with AUC = {} - Accuracy = {}%".format(
round(roc_auc["macro"], 2), round(test_accuracy, 2)),
color = "navy", linestyle=":", linewidth=4)
colors = cycle(["red", "orange", "blue", "pink", "green"])
for i, color in zip(range(num_class), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label = "ROC curve of class {0} (AUC = {1:0.2f})".format(i, roc_auc[i]))
plt.plot([0,1], [0,1], "k--", lw=2)
plt.title("ROC-AUC for {}".format(type_search), fontsize=20)
plt.xlabel("False Positive Rate", fontsize=15)
plt.ylabel("True Positive Rate", fontsize=15)
plt.legend(loc="lower right")
plt.show()
importances = best_clf.feature_importances_
indices = np.argsort(importances)[::-1]
return {"importance": importances,
"index": indices,
"y_pred": y_pred,
"y_pred_prob": y_pred_prob}
###############################################################################
################ Forward algorithm to variable selection ######################
###############################################################################
def random_forest_forward(X_train, y_train, X_test, y_test, n_selected_features = 1000, scoring='accuracy'):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion='gini', random_state=42)
n_iter_search = 30
scoring = scoring
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
while count < n_selected_features:
max_acc = 0
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
print("The total time for searching subset: {}".format(dt.now()-st_t))
return all_info, all_model, f
def random_forest_randomforward(X_train, y_train, X_test, y_test,
n_selected_features = 1000, scoring='accuracy', n_iter=1000):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
import random
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion='gini', random_state=42)
n_iter_search = 30
scoring = scoring
# selected feature set, initialized to be empty
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
while count < n_selected_features:
#F = []
max_acc = 0
for i in range(n_iter):
col_train = random.sample(list(X_train.columns), count+1)
col_train = np.array(col_train)
X_train_tmp = X_train[col_train]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[col_train])
acc = metrics.accuracy_score(y_test, y_pred)
if acc > max_acc:
max_acc = acc
idx = col_train
best_model = best_estimator
#F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
all_F.append(idx)
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_features', 'Accuracy', 'Features']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
print("The total time for searching subset: {}".format(dt.now()-st_t))
return all_info, all_model, f
def xgboost_forward(X_train, y_train, X_test, y_test, n_selected_features = 1000, scoring='accuracy'):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
import random
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_child_weight = [5, 10, 25, 50, 75, 100]
gamma = [0.5, 1, 1.5, 2, 5]
subsample = [0.2, 0.4, 0.6, 0.8, 1]
colsample_bytree = [0.2, 0.4, 0.6, 0.8, 1]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_child_weight': min_child_weight,
'gamma': gamma,
'subsample': subsample,
'colsample_bytree': colsample_bytree}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
xgb = XGBClassifier(learning_rate=0.02, objective='multi:softmax', silent=True, nthread=20)
n_iter_search = 30
scoring = scoring
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
while count < n_selected_features:
max_acc = 0
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=xgb,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = | pd.DataFrame(all_acc) | pandas.DataFrame |
# speed and power...solves many many things. -<NAME>
# name: make_tsne.py
# author: <NAME>
# last update: 20/05/2021
# description: makes tsne graphs per news source.
import sklearn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gensim
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm, preprocessing
from sklearn.manifold import TSNE
from sklearn.model_selection import cross_val_score, train_test_split
def get_stopwords():
file = open('data/stopwords', 'r+', encoding='utf-8')
stop_words = file.read().split('\n')
return stop_words
def get_data(dataframe):
dataframe['is_sponsored'] = np.where(dataframe['sponsor'] != 'none', 1, 0)
return dataframe['is_sponsored']
def get_classes(dataframe):
dataframe['class'] = np.where(dataframe['sponsor'] != 'none', 'advertorial', 'article')
return dataframe['class']
if __name__ == '__main__':
sources = ['nu', 'telegraaf', 'nrc', 'ondernemer']
for source in sources:
data_ads = | pd.read_csv('data/ads/ads_' + source + '.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from pylivetrader.data.data_portal import DataPortal
from pylivetrader.assets import AssetFinder
from pylivetrader.assets import Equity
from pylivetrader.misc.pd_utils import normalize_date
from pylivetrader.finance.order import Order as ZPOrder
from trading_calendars import get_calendar
def get_fixture_data_portal(**kwargs):
b = Backend(**kwargs)
finder = AssetFinder(b)
return DataPortal(b, finder, b._calendar, False)
def create_bars(minutes, offset):
length = len(minutes)
return pd.DataFrame({
'open': np.arange(length) + 10 + offset,
'high': np.arange(length) + 15 + offset,
'low': np.arange(length) + 8 + offset,
'close': np.arange(length) + 10 + offset,
'volume': 100 + offset,
}, index=minutes)
class Backend:
def __init__(self, start=None, end=None, assets=None, exchange='NYSE'):
self.start = normalize_date(pd.Timestamp(start or '2018-08-13'))
self.end = normalize_date(pd.Timestamp(end or '2018-08-14'))
self._exchange = exchange
self._calendar = get_calendar(exchange)
self.assets = assets or ['asset-0', 'asset-1', 'asset-2']
minutes = self._calendar.minutes_for_sessions_in_range(
self.start, self.end)
self._minutely_bars = {}
for i, asset in enumerate(self.get_equities()):
bars = create_bars(minutes, i)
self._minutely_bars[asset] = bars
days = self._calendar.sessions_in_range(self.start, self.end)
self._daily_bars = {}
for i, asset in enumerate(self.get_equities()):
bars = create_bars(days, i)
self._daily_bars[asset] = bars
def get_equities(self):
return [
Equity(
asset,
symbol=asset.upper().replace('-', ''),
exchange='NYSE',
start_date=self.start,
end_date=self.end + pd.Timedelta('1000 days'),
) for asset in self.assets
]
def get_adjusted_value(self, assets, field, dt, data_frequency):
return self.get_spot_value(assets, field, dt, data_frequency, False)
def get_spot_value(
self,
assets,
field,
dt,
data_frequency,
quantopian_compatible=True):
assets_is_scalar = not isinstance(assets, (list, set, tuple))
field = 'close' if field == 'price' else field
if assets_is_scalar:
if 'd' in data_frequency:
return self._daily_bars[assets][field].iloc[-1]
else:
return self._minutely_bars[assets][field].iloc[-1]
if 'd' in data_frequency:
return pd.Series([
self._daily_bars[asset][field].iloc[-1]
for asset in assets
], index=assets)
else:
return pd.Series([
self._minutely_bars[asset][field].iloc[-1]
for asset in assets
], index=assets)
def get_bars(self, assets, data_frequency, bar_count=500, end_dt=None):
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
assets = [assets]
barslist = []
for asset in assets:
if 'm' in data_frequency:
bars = self._minutely_bars[asset].copy()
else:
bars = self._daily_bars[asset].copy()
bars.columns = pd.MultiIndex.from_product([[asset], bars.columns])
barslist.append(bars[-bar_count:])
return | pd.concat(barslist, axis=1) | pandas.concat |
#GUIclass.py
#Description: GUI Module
#Engineer: <NAME>
#Date: 20200115
"""
This is the launch point for the HEAT code, when accessing from the HTML GUI.
It calls HEAT functions and generates heat fluxes based upon user input. These
functions are usually called from the dashGUI.py script, which is the FLASK
binding to html.
"""
import CADClass
import MHDClass
import toolsClass
import heatfluxClass
import openFOAMclass
import pfcClass
import time
import numpy as np
import logging
import os
import pandas as pd
import shutil
import errno
import copy
import EFIT.equilParams_class as EP
import GUIscripts.plotlyGUIplots as pgp
import trimesh
log = logging.getLogger(__name__)
tools = toolsClass.tools()
def create_app(GUIobj):
"""
Creates flask app with GUI object so that we can refer to it later.
"""
from flask import Flask
app = Flask(__name__)
app.config['GUI'] = GUIobj
return app
def create_DASH_app(GUIobj):
import dash
app = dash.Dash(__name__, meta_tags=[{"name": "viewport", "content": "width=device-width"}])
app.config['GUI'] = GUIobj
return app
class GUIobj():
def __init__(self, logFile, rootDir, dataPath, OFbashrc):
#where HEAT log is written
self.logFile = logFile
#where python source code is located (dashGUI.py)
self.rootDir = rootDir
#where we are saving data / HEAT output
self.dataPath = dataPath
#initialize all the HEAT python submodules (subclasses)
self.initializeEveryone()
#Set timestepMap to nothing
self.timestepMap = None
#Make a tmp dir that we will use for loading/unloading files from GUI
self.makeTmpDir(dataPath)
#initialize bashrc for OF
self.OF.OFbashrc = OFbashrc
return
def makeTmpDir(self,dataPath):
"""
makes a temp directory in rootDir path for user uploaded gfiles
the self.tmpDir directory is accessible to the GUI users for uploading
and downloading
"""
tempDir = dataPath + '/tmpDir/'
self.tmpDir = tempDir
self.MHD.tmpDir = tempDir
try:
os.mkdir(tempDir)
print("Directory " , tempDir , " Created ")
except FileExistsError:
try: shutil.rmtree(tempDir)
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
sys.exit()
os.mkdir(tempDir)
print("Directory " , tempDir , " Created ")
return
def machineSelect(self, MachFlag):
"""
Select a machine and set the necessary paths
"""
self.MachFlag = MachFlag
self.setInitialFiles()
return
def setInitialFiles(self):
"""
sets files back to default settings
infile is path to input file with all HEAT parameters
PartsFile is path to file with parts we will calculate HF on
IntersectFile is path to file with parts we will check for intersections on
"""
if self.MachFlag == 'nstx':
print('Loading NSTX-U Input Filestream')
log.info('Loading NSTX-U Input Filestream')
self.infile = self.rootDir + '/inputs/NSTXU/NSTXU_input.csv'
self.pfcFile = self.rootDir + '/inputs/NSTXU/NSTXUpfcs.csv'
self.OF.meshDir = self.dataPath + '/NSTX/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/NSTX/STLs/'
self.CAD.STPpath = self.dataPath + '/NSTX/STPs/'
elif self.MachFlag == 'st40':
print('Loading ST40 Input Filestream')
log.info('Loading ST40 Input Filestream')
self.infile = self.rootDir + '/inputs/ST40/ST40_input.csv'
self.pfcFile = self.rootDir + '/inputs/ST40/ST40pfcs.csv'
self.OF.meshDir = self.dataPath + '/ST40/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/ST40/STLs/'
self.CAD.STPpath = self.dataPath + '/ST40/STPs/'
elif self.MachFlag == 'd3d':
print('Loading DIII-D Input Filestream')
log.info('Loading DIII-D Input Filestream')
self.infile = self.rootDir + '/inputs/D3D/D3D_input.csv'
self.pfcFile = self.rootDir + '/inputs/D3D/D3Dpfcs.csv'
self.OF.meshDir = self.dataPath + '/D3D/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/D3D/STLs/'
self.CAD.STPpath = self.dataPath + '/D3D/STPs/'
elif self.MachFlag == 'step':
print('Loading STEP Input Filestream')
log.info('Loading STEP Input Filestream')
self.infile = self.rootDir + '/inputs/STEP/STEP_input.csv'
self.pfcFile = self.rootDir + '/inputs/STEP/STEPpfcs.csv'
self.OF.meshDir = self.dataPath + '/STEP/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/STEP/STLs/'
self.CAD.STPpath = self.dataPath + '/STEP/STPs/'
elif self.MachFlag == 'sparc':
print('Loading SPARC Input Filestream')
log.info('Loading SPARC Input Filestream')
self.infile = self.rootDir + '/inputs/SPARC/SPARC_input.csv'
self.pfcFile = self.rootDir + '/inputs/SPARC/SPARCpfcs.csv'
self.OF.meshDir = self.dataPath + '/SPARC/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/SPARC/STLs/'
self.CAD.STPpath = self.dataPath + '/SPARC/STPs/'
else:
print("INVALID MACHINE SELECTION! Defaulting to NSTX-U!")
log.info("INVALID MACHINE SELECTION! Defaulting to NSTX-U!")
self.infile = self.rootDir + '/inputs/NSTXU/NSTXU_input.csv'
self.pfcFile = self.rootDir + '/inputs/NSTXU/NSTXUpfcs.csv'
self.OF.meshDir = self.dataPath + '/NSTX/3Dmeshes'
self.CAD.STLpath = self.dataPath + '/NSTX/STLs/'
self.CAD.STPpath = self.dataPath + '/NSTX/STPs/'
self.OF.templateCase = self.rootDir + '/openFoamTemplates/heatFoamTemplate'
self.OF.templateDir = self.rootDir + '/openFoamTemplates/templateDicts'
return
def initializeEveryone(self):
"""
Create objects that we can reference later on
"""
self.MHD = MHDClass.MHD(self.rootDir, self.dataPath)
self.CAD = CADClass.CAD(self.rootDir, self.dataPath)
self.HF = heatfluxClass.heatFlux(self.rootDir, self.dataPath)
self.OF = openFOAMclass.OpenFOAM(self.rootDir, self.dataPath)
return
def getMHDInputs(self,shot=None,tmin=None,tmax=None,nTrace=None,
gFileList=None,gFileData=None,plasma3Dmask=None):
"""
Get the mhd inputs from the gui or input file
"""
self.MHD.allowed_class_vars()
tools.vars2None(self.MHD)
self.MHD.MachFlag=self.MachFlag
tools.read_input_file(self.MHD, infile=self.infile)
self.MHD.setTypes()
if shot is not None:
self.MHD.shot = shot
if tmin is not None:
self.MHD.tmin = tmin
if tmax is not None:
self.MHD.tmax = tmax
if nTrace is not None:
self.MHD.nTrace = nTrace
self.MHD.gFileList = gFileList
if gFileList is not None:
self.MHD.writeGfileData(gFileList, gFileData)
if plasma3Dmask is not None:
self.MHD.plasma3Dmask = plasma3Dmask
self.MHD.tree = 'EFIT02'
if self.dataPath[-1]!='/':
self.MHD.shotPath = self.dataPath + '/' + self.MHD.MachFlag +"_{:06d}".format(self.MHD.shot)
else:
self.MHD.shotPath = self.dataPath + self.MHD.MachFlag +"_{:06d}".format(self.MHD.shot)
self.MHD.get_mhd_inputs('nstx',self.MHD.gFileList)
# self.t = self.MHD.timesteps[0]
self.MHD.makeEFITobjects()
self.NCPUs = 4
self.MHD.psiSepLimiter = None
self.MHD.setTypes()
print('psiSep0 = {:f}'.format(self.MHD.ep[0].g['psiSep']))
print('psiAxis0 = {:f}'.format(self.MHD.ep[0].g['psiAxis']))
print('Nlcfs0: {:f}'.format(self.MHD.ep[0].g['Nlcfs']))
print('length Rlcfs0: {:f}'.format(len(self.MHD.ep[0].g['lcfs'][:,0])))
log.info('psiSep0 = {:f}'.format(self.MHD.ep[0].g['psiSep']))
log.info('psiAxis0 = {:f}'.format(self.MHD.ep[0].g['psiAxis']))
log.info('Nlcfs0: {:f}'.format(self.MHD.ep[0].g['Nlcfs']))
if self.MHD.plasma3Dmask==1:
print('Solving for 3D plasmas with MAFOT')
log.info('Solving for 3D plasmas with MAFOT')
else:
print('Solving for 2D plasmas with EFIT (no MAFOT)')
log.info('Solving for 2D plasmas with EFIT (no MAFOT)')
return
def gfileClean(self, psiRZMult,psiSepMult,psiAxisMult,FpolMult,
psiRZAdd,psiSepAdd,psiAxisAdd,FpolAdd, t):
"""
multiplies values in MHD ep object with scalars defined by user in html gui
"""
print("psiRZ Multiplier = {:f}".format(psiRZMult))
log.info("psiRZ Multiplier = {:f}".format(psiRZMult))
print("psiRZ Addition = {:f}".format(psiRZAdd))
log.info("psiRZ Addition = {:f}".format(psiRZAdd))
print("psiSep Multipltier = {:f}".format(psiSepMult))
log.info("psiSep Multipltier = {:f}".format(psiSepMult))
print("psiSep Addition = {:f}".format(psiSepAdd))
log.info("psiSep Addition = {:f}".format(psiSepAdd))
print("psiAxis Multipltier = {:f}".format(psiAxisMult))
log.info("psiAxis Multipltier = {:f}".format(psiAxisMult))
print("psiAxis Addition = {:f}".format(psiAxisAdd))
log.info("psiAxis Addition = {:f}".format(psiAxisAdd))
print("Fpol Multiplier = {:f}".format(FpolMult))
log.info("Fpol Multiplier = {:f}".format(FpolMult))
print("Fpol Addition = {:f}".format(FpolAdd))
log.info("Fpol Addition = {:f}".format(FpolAdd))
idx = np.where(t==self.MHD.timesteps)[0][0]
ep = self.MHD.ep[idx]
ep.g['psiRZ'] *= psiRZMult
ep.g['psiSep'] *= psiSepMult
ep.g['psiAxis'] *= psiAxisMult
ep.g['Fpol'] *= FpolMult
ep.g['psiRZ'] += psiRZAdd
ep.g['psiSep'] += psiSepAdd
ep.g['psiAxis'] += psiAxisAdd
ep.g['Fpol'] += FpolAdd
psi = ep.g['psiRZ']
psiSep = ep.g['psiSep']
psiAxis = ep.g['psiAxis']
ep.g['psiRZn'] = (psi - psiAxis) / (psiSep - psiAxis)
return
def findPsiSepfromEQ(self,t, rNew=None):
"""
finds psiSep by stepping to/from core and calculating
minimum psiN along Z-plane at each R location. Increments in um
"""
tIdx = np.where(t==self.MHD.timesteps)[0][0]
ep = self.MHD.ep[tIdx]
gfile = self.MHD.shotPath + '/' + '{:06d}/'.format(t) + 'g{:6d}.{:05d}'.format(self.MHD.shot, t)
#redefine LCFS to be tangent to CAD maximum R (because rNew=None)
self.newLCFS(t, rNew=rNew, zNew=None, psiSep=None)
print("CAD rTangent: {:f}".format(self.MHD.rTangent))
if rNew is None:
rSep = self.MHD.rTangent
else:
rSep = float(rNew)
zMin = ep.g['ZmAxis'] - 0.25
zMax = ep.g['ZmAxis'] + 0.25
zWall = np.linspace(zMin, zMax, 100000)
while(True):
print("rSep = {:f}".format(rSep))
rWall = np.ones((len(zWall)))*rSep
psiN = ep.psiFunc.ev(rWall, zWall).min()
print("psiN Minimum = {:f}".format(psiN))
if psiN < 1.0:
rSep -= 1e-6 #step 1um away from core
else:
break
self.MHD.rTangent = rSep
#now that we have found rTangent and the psiSep that makes this CAD
#truly limited, write this psiSep to all gfiles in HEAT tree
self.newLCFSallTimesteps(rNew=self.MHD.rTangent, zNew=None, psiSep=None)
#self.MHD.makeEFITobjects()
for PFC in self.PFCs:
PFC.resetPFCeps(self.MHD)
def findPsiSepfromPFCs(self, t, rNew=None):
"""
finds psiSep for limiters by incrementally increasing R_{psiSep, IMP},
at a specific time. Then rewrite all gfiles in MHD.timesteps with this
new LCFS.
Both MHD and PFC objects must be defined before running this function
"""
tIdx = np.where(t==self.MHD.timesteps)[0][0]
gfile = self.MHD.shotPath + '{:06d}/'.format(t) + 'g{:06d}.{:05d}'.format(self.MHD.shot, t)
#redefine LCFS to be tangent to CAD maximum R (because rNew=None)
self.newLCFS(t, rNew=rNew, zNew=None, psiSep=None)
print("CAD rTangent: {:f}".format(self.MHD.rTangent))
#run 2D equilibrium for all points in PFCs and determine if this rTangent
#actually resulted in all psiN > 1.0. If not, add 1um to rTangent and
#test it again, looping until all points in CAD are in SOL
while(True):
privateN = 0
for i,PFC in enumerate(self.PFCs):
PFC.shadowed_mask = np.zeros((len(PFC.centers)))
PFC.ep = EP.equilParams(gfile)
self.MHD.psi2DfromEQ(PFC)
psiMinimum = min(PFC.psimin)
if psiMinimum < 1.0:
print(PFC.ep.g['psiSep'])
print(psiMinimum)
privateN += 1
#psiIdx = np.argmin(PFC.psimin)
#x[i] = PFC.centers[psiIdx][0]
#y[i] = PFC.centers[psiIdx][1]
#z[i] = PFC.centers[psiIdx][2]
#R,Z,phi = tools.xyz2cyl(x,y,z)
if privateN > 0:
print("Number of PFCs with points in core: {:d}".format(privateN))
print("psiSep: {:f}".format(self.MHD.psiSepLimiter))
print("Incrementing rTangent by 10 um...")
self.MHD.rTangent += 1e-6 #add 1 um
print("New rTangent: {:f}".format(self.MHD.rTangent))
self.newLCFS(t, rNew=self.MHD.rTangent, zNew=None, psiSep=None)
else:
break
#now that we have found rTangent and the psiSep that makes this CAD
#truly limited, write this psiSep to all gfiles in HEAT tree
self.newLCFSallTimesteps(rNew=self.MHD.rTangent, zNew=None, psiSep=None)
#self.MHD.makeEFITobjects()
return
def newLCFSallTimesteps(self, rNew=None, zNew=None, psiSep=None):
"""
Loops through the timesteps in MHD object and overwrites new gfiles
with user defined psiSep
"""
for t in self.MHD.timesteps:
self.newLCFS(t,rNew,zNew,psiSep)
return
def newLCFS(self, t, rNew=None, zNew=None, psiSep=None):
"""
resets the lcfs so that it is defined as psi value at rminNew, zminNew,
or psiSep defines new LCFS. For use with limited discharges
overwrites existing gfile
"""
print("redefining LCFS")
log.info("redefining LCFS")
idx = np.where(t==self.MHD.timesteps)[0][0]
ep = self.MHD.ep[idx]
if zNew in ['None', 'none', 'na', 'NA', 'N/A', 'n/a', '', ' ', None]:
zNew = None
else:
zNew = float(zNew)
#find maximum value of R in all of the PFCs
if rNew in ['None', 'none', 'na', 'NA', 'N/A', 'n/a', '', ' ', None]:
print('Using CAD rNew tangent point')
rNew = 0.0
for PFC in self.PFCs:
if PFC.Rmax > rNew:
rNew = PFC.Rmax
else:
rNew = float(rNew)
g = self.MHD.renormalizeLCFS(self.MHD.ep[idx], rNew, zNew, psiSep)
ep.g['psiSep'] = g['psiSep']
ep.g['lcfs'] = g['lcfs']
ep.g['Nlcfs'] = g['Nlcfs']
#set psi and R for LCFS tangent point
self.MHD.psiSepLimiter = g['psiSep']
self.MHD.rTangent = rNew
#overwrite existing gfile
gfile = self.MHD.shotPath + '{:06d}/'.format(t) + 'g{:06d}.{:05d}'.format(self.MHD.shot, t)
self.MHD.writeGfile(gfile, shot=self.MHD.shot, time=t, ep=ep)
self.MHD.ep[idx] = EP.equilParams(gfile)
for PFC in self.PFCs:
PFC.resetPFCeps(self.MHD)
return
def writeGfile(self, newGfile=None, shot=None, t=None):
"""
writes a new gfile from EP object already in MHD object
"""
idx = np.where(t==self.MHD.timesteps)[0][0]
ep = self.MHD.ep[idx]
if os.path.isabs(newGfile) is False:
#save to tmpDir if path is not absolute
newGfile = self.tmpDir + newGfile
else:
print("Please enter a filename that is not absolute (no directories)")
log.info("Please enter a filename that is not absolute (no directories)")
return
print("Writing new gFile: " + newGfile)
log.info("Writing new gFile: " + newGfile)
self.MHD.writeGfile(newGfile, shot, t, ep)
return
def interpolateGfile(self, t):
"""
finds values of all gfile parameters for user defined time using the
gfiles in the self.MHD.ep object
interpolates at given t then writes to file in same gFileInterpolate
directory
returns the name of the new gFile
"""
print("Interpolating gFile")
log.info("Interpolating gFile")
t = int(t)
ep = self.MHD.gFileInterpolate(t)
gFileName = self.tmpDir + 'g{:06d}.{:05d}'.format(self.MHD.shot,t)
self.MHD.writeGfile(gFileName,self.MHD.shot,t,ep)
print("gFile Interpolated")
log.info("gFile Interpolated")
return gFileName
def interpolateNsteps(self, gfiles, timesteps, N):
"""
interpolates N steps between gfiles arranged at user defined timesteps
Saves resulting gfiles into tmpDir, then creates zip file containing
them all
timesteps and gfiles should be sorted so that they are increasing in
chronological order
gfiles should be named following d3d convention by Lau:
g<XXXXXX>.<YYYYY>
where <XXXXXX> is shot number and <YYYYY> is timestep[ms]
"""
#change filename to d3d convention
self.MHD.tmax = int(max(timesteps))
self.MHD.tmin = int(min(timesteps))
shot = self.MHD.shot
newGfiles = []
for i,f in enumerate(gfiles):
newGfiles.append('g{:06d}.{:05d}'.format(shot,timesteps[i]))
old = self.tmpDir + gfiles[i]
new = self.tmpDir + newGfiles[i]
try:
shutil.copyfile(old, new)
except:
print("Could not copy timestep {:d}. Skipping.".format(timesteps[i]))
log.info("Could not copy timestep {:d}. Skipping.".format(timesteps[i]))
#rebuild eq objects
self.MHD.get_mhd_inputs(self.MachFlag,newGfiles)
self.MHD.makeEFITobjects()
#interpolate between existing gfiles
newNames = []
nTime = len(timesteps)
for i in range(nTime-1):
times = np.linspace(timesteps[i],timesteps[i+1],N)
for t in times:
newName = self.interpolateGfile(t)
newNames.append(newName)
#now zip all these new gFiles into a single file that the user may
#download from GUI
from zipfile import ZipFile
from os.path import basename
zipFile = self.tmpDir + 'InterpolatedGfiles.zip'
zipObj = ZipFile(zipFile, 'w')
for f in newNames:
zipObj.write(f, basename(f))
zipObj.close()
return
def getCADResInputs(self,ROIGridRes=None,gridRes=None):
"""
Loads CAD inputs
"""
import numpy as np
tools.initializeInput(self.CAD, infile=self.infile)
self.CAD.rootDir = self.rootDir #set HEAT rootDir from HEATgui.py
if ROIGridRes is not None:
self.CAD.ROIGridRes = ROIGridRes
if gridRes is not None:
#check if intersection grid resolution string is a number,
#if not use standard mesh algorithms
if tools.is_number(gridRes):
self.CAD.gridRes = gridRes
else:
self.CAD.gridRes = "standard"
return
def getCAD(self,STPfile=None,STPdata=None):
"""
Loads CAD file
"""
import numpy as np
if hasattr(self.CAD, 'gridRes'):
pass
else:
tools.initializeInput(self.CAD, infile=self.infile)
self.CAD.rootDir = self.rootDir #set HEAT rootDir from HEATgui.py
if STPfile is not None:
#make STP path if it doesnt exist
try:
os.makedirs(self.CAD.STPpath)
except:
print("Did not create STPpath. It probably exists already.")
newSTPpath = self.CAD.STPpath + STPfile
#check to see if this STP file exists and write data to the file
if os.path.isfile(newSTPpath) == False:
with open(newSTPpath, 'wb') as f:
f.write(STPdata)
self.CAD.STPfile = newSTPpath
#load STP file using FreeCAD
self.CAD.loadSTEP()
return
def getPFCdataFromGUI(self, data):
"""
initializes timestepMap from GUI rather than from file
"""
self.timestepMap = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import pandas as pd
from red_panda.pandas.utils import (
merge_dfs,
row_number,
groupby_mutate
)
import logging
LOGGER = logging.getLogger(__name__)
def test_merge_dfs():
df1 = pd.DataFrame(data={"col0": [0, 1, 2], "col1": [1, 2, 3]})
df2 = | pd.DataFrame(data={"col0": [0, 1, 2], "col2": [1, 2, 3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Created on 07/01/2022 15:51:23
# @author: ErwingForero
#
from functools import reduce
import os
import re
from sre_compile import isstring
from time import time
from utils import feature_flags
from utils import constants as const
from afo import afo_types
from utils import index as utils
from typing import Any
import pandas as pd
import numpy as np
class DataFrameOptimized():
def __init__(self, table=None, **kargs) -> None:
self.table = None
self.__alerts = None
# methods
if table is not None:
self.__process_table(table, **kargs)
self.create_alerts()
def __len__(self) -> int:
return len(self.table) if self.table is not None else 0
def __process_table(self, table: 'Any', **kargs) -> None:
"""Processes a table from a file or a str - like object .
Args:
table (Any): table to be processed
Raises:
FileNotFoundError: if table is a path not found
TypeError: if type is invalid
"""
if isinstance(table, str):
if not os.path.exists(table):
raise FileNotFoundError(f"file not found {table}")
if "csv" in table or "txt" in table:
self.table = pd.read_csv(table, **kargs)
elif "xls" in table:
self.table = pd.read_excel(table, **kargs)
elif isinstance(table, (tuple, list)) or type(table).__module__ == np.__name__:
self.table = pd.DataFrame(table, **kargs)
elif isinstance(table, (pd.DataFrame)):
self.table = table
else:
raise TypeError(f"Invalid permisible type of {table}")
def delete_rows(self, criteria: 'np.array') -> 'DataFrameOptimized':
"""Delete rows from the dataframe .
Args:
criteria ([numpy.array]): mask of registers, ej: np.alert([True, False, True])
Raises:
ValueError: if actual instance hasn't table
Exception: Generic Exception
Returns:
['DataFrameOptimized']: actual instance of DataFrameOptimized
"""
try:
if self.table is not None:
_df = self.table
else:
raise ValueError("delete_rows - instance need table")
self.table = _df[criteria]
return self
except Exception as e:
raise Exception(f"delete_rows {e}")
def create_alerts(self) -> None:
if self.table is not None:
_columns = [*self.table.columns.to_list(), "description"]
self.__alerts = pd.DataFrame(columns=_columns)
else:
raise Exception("Required table of DataFrameOptimized")
def insert_alert(self, alert: 'Any', description: str) -> None:
"""Inserts an alert into the alert list .
Args:
alert ([Any]): Register with alert
description (str): description of alert
Raises:
Exception: Generic Exception
"""
try:
alert["description"] = description
_alerts_columns = self.table.columns.tolist()
# get only the columns that exist in the alerts
_required_of_alert = alert[[*_alerts_columns, "description"]]
self.__alerts = pd.concat(
[self.__alerts, _required_of_alert], ignore_index=True)
except Exception as e:
raise Exception(f"insert_alert {e}")
def get_alerts(self):
return self.__alerts
def validate_alert(self, mask: bool, description: str, type: str, exception: bool=False, exception_description: str = "", aux_table: 'pd.DataFrame'=None):
"""Validate an alert .
Args:
mask (bool): [description]
description (str): [description]
exception (bool, optional): [description]. Defaults to False.
exception_description (str, optional): [description]. Defaults to "".
"""
if mask.sum() > 0:
if aux_table is None:
self.insert_alert(
alert=self.table[mask],
description=description
)
if exception:
table = self.get_alerts() if aux_table is None else aux_table[mask]
table.to_csv(
os.path.normpath(os.path.join(const.ALERTS_DIR, f"{afo_types.AFO_TYPES[type].value}_alerts.csv")),
index=False,
encoding="latin-1",
sep=";")
if feature_flags == "PROD":
raise Exception(exception_description)
def get_rows(self, criteria: 'np.array') -> 'DataFrameOptimized':
"""Get rows from the dataframe .
Args:
criteria ([numpy.array]): mask of registers, ej: np.alert([True, False, True])
Raises:
ValueError: if actual instance hasn't table
Exception: Generic Exception
Returns:
['DataFrameOptimized']: actual instance of DataFrameOptimized
"""
try:
if self.table is not None:
_df = self.table
else:
raise ValueError("delete_rows - instance need table")
return _df[criteria]
except Exception as e:
raise Exception(f"delete_rows {e}")
def replace_by(
self,
dataframe_right: 'pd.DataFrame',
type_replace: str="all",
mask: list[bool]=None,
on: 'str|list'=None,
left_on: 'str|list'=None,
right_on: 'str|list'=None,
how: str="left",
left_replace: 'list|str'=None,
right_replacer: 'list|str'=None,
create_columns: bool=False,
**kargs) -> 'pd.DataFrame':
"""Replace values in the dataframe with the values in the given dataframe_right.
first merge two dataframes by key (on or (left_on, right_on)), before replace the values
Args:
dataframe_right ([pd.DataFrame]): dataframe that contains key to merge with actual table
type_replace ([str]): type of replace, valid:
all: all values be reaplaced
not_nan: only the values found that have not been NaN in "dataframe_right" will be replaced
mask: reaplace values by mask
invert_mask: replace values by invert mask
mask (bool, optional): mask for reaplace values, expected same length that given dataframe_right, Defaults to None.
on (str|list, optional): key-column in both dataframes, Defaults to None.
left_on (str|list, optional): key-column in left dataframe, Defaults to None.
right_on (str|list, optional): key-column in right dataframe, Defaults to None.
how (str, optional): type of merge dataframes (it's recomended to leave the default value), Defaults to left.
left_replace (str|list, optional): column to be replaced by (right_on or right_replacer), Defaults to None.
right_replacer (str|list, optional): column to replace left column, Defaults to None.
create_columns (bool, optional): if left columns not exist is created, Defaults to False.
Returns:
pd.DataFrame: actual table updated
"""
if on is None and right_on is None:
raise ValueError("Required a value key in dataframe_right")
if mask is None and type_replace not in ["not_nan", "all"]:
raise ValueError("mask is required")
_temp_table = self.table.merge(
right=dataframe_right,
on=on,
left_on=left_on,
right_on=right_on,
how=how,
**kargs
)
key_right = (
on if on is not None else right_on) if right_replacer is None else right_replacer
key_left = (
on if on is not None else left_on) if left_replace is None else left_replace
if isinstance(key_right, (list, tuple)):
if len(key_left) != len(key_right):
raise ValueError(f"Invalid size of keys list, left length {len(key_left)}, right length {len(key_right)}")
for idx, key_r in enumerate(key_right):
self.replace_by(
dataframe_right=dataframe_right,
on=on,
left_on=left_on,
right_on=right_on,
how=how,
type_replace=type_replace,
mask=mask,
left_replace=key_left[idx],
right_replacer=key_r,
create_columns=create_columns,
**kargs
)
else:
if create_columns:
self.table[key_left] = np.nan
if type_replace == "mask":
pass
elif type_replace == "invert_mask":
mask = ~mask
elif type_replace == "not_nan":
mask = ~pd.isna(_temp_table[key_right])
elif type_replace == "all":
mask = np.full(len(self.table), True)
self.table.loc[mask, key_left] = _temp_table.loc[mask, key_right]
return self.table
def replace_many_by(
self,
dataframe_right: 'pd.DataFrame|list',
on=None,
left_on=None,
right_on=None,
how="left",
merge=True,
mask=None,
mask_idx=0,
columns_right=None,
columns_left=None,
type="change",
type_replace="not_nan",
def_value=np.nan, **kargs):
"""Replace values in the dataframe with the values in the given dataframe_right.
first merge two dataframes by key (on or (left_on, right_on)), before replace column by column
Args:
dataframe_right ([pd.DataFrame]): dataframe that contains key to merge with actual table
mask (bool, optional): mask for reaplace values, expected same length that given dataframe_right, Defaults to None.
on (str|list, optional): key-column in both dataframes, Defaults to None.
left_on (str|list, optional): key-column in left dataframe, Defaults to None.
right_on (str|list, optional): key-column in right dataframe, Defaults to None.
how (str, optional): type of merge dataframes (it's recomended to leave the default value), Defaults to left.
merge (bool, optional): merge dataframes or not, Defaults to True.
mask (list, optional): mask of columns, Defaults to None.
mask_idx (inst, optional): if mask not exist found in dataframe_right index 0 or 1, for create mask, Defaults to 0.
columns_right (str|list, optional): columns of dataframe_right to replace values, for create mask, Defaults to None.
columns_left (str|list, optional): columns of dataframe_right to replace values, for create mask, Defaults to None.
type (str, optional): type of replace columns, Defaults to change, valid:
change: update exist values
add_news: add new columns
type_replace ([str]): type of replace values, valid:
all: all values be reaplaced
not_nan: only the values found that have not been NaN in "dataframe_right" will be replaced
mask: reaplace values by mask
invert_mask: replace values by invert mask
def_value (Any, optional): optional value for columns added news, Defaults to NaN.
Returns:
pd.DataFrame: actual table updated
"""
if on is None and right_on is None and merge:
raise ValueError("Required a value key in dataframe_right")
if merge == False:
_temp_table = dataframe_right
elif isinstance(dataframe_right, (list, tuple)):
if len(dataframe_right) > 2:
raise ValueError("Invalid size for dataframe_right")
_temp_table = [
self.table.merge(
right=data,
on=on,
left_on=left_on,
right_on=right_on,
how=how,
**kargs) for data in dataframe_right
]
else:
_temp_table = self.table.merge(
right=dataframe_right,
on=on,
left_on=left_on,
right_on=right_on,
how=how,
**kargs
)
for idx, _column in enumerate(columns_left):
if type == "add_news" and _column not in self.table.columns.tolist():
self.table[_column] = np.full((len(self.table), ), def_value)
if type_replace == "mask":
pass
elif type_replace == "not_nan":
mask = ~pd.isna(_temp_table[mask_idx][columns_right[mask_idx][idx]]) \
if isinstance(_temp_table, (list, tuple)) \
else ~pd.isna(_temp_table[columns_right[idx]])
elif type_replace == "all":
mask = np.full(len(self.table), True)
if isinstance(_temp_table, (list, tuple)):
self.table.loc[mask, _column] = _temp_table[0].loc[mask,
columns_right[0][idx]]
self.table.loc[~mask, _column] = _temp_table[1].loc[~mask,
columns_right[1][idx]]
else:
self.table.loc[mask, _column] = _temp_table.loc[mask,
columns_right[idx]]
return self.table
def save_csv(self, folder_path: str, name: str = None, sep=";", **kargs) -> str:
"""Save the table to a CSV file .
Args:
folder_path (str): folder
name (str, optional): name of csv file. Defaults to None.
sep (str, optional): separator. Defaults to ";".
"""
if name is None:
name = f"{time.time()}.csv"
route = os.path.normpath(os.path.join(folder_path, name))
self.table.to_csv(path_or_buf=route, sep=sep, **kargs)
return route
@staticmethod
def mask_by(data: 'pd.DataFrame', filter: 'object', replace: bool = False, aux_func: 'func' = None) ->'tuple[pd.DataFrame, pd.Series]':
"""Mask column with a given filter.
Args:
data (pd.DataFrame): [description]
filter (Object): {
"column": name of column of data to be filter,
"<<TYPE>>":"<<VALUE>>"
<<TYPE>>, permisible values: "more", "less", "equal", "diff", "contains"
and valid merge
"<<TYPE>>_and_<<TYPE>>" or "<<TYPE>>_or_<<TYPE>>", example:
"more_and_equals", "more_or_less"
<<Value>>, permisible Any
Examples:
{"column": "first_col", "equal": 0}
{"column": "first_col", "equal_or_less": 0}
{"column": "first_col", "contains": "(?i)_final"}
}
replace (bool, optional): replace dataframe by dataframe filtered. Defaults to False.
aux_func (Any, optional): Function to filter column. Defaults to None.
Raises:
ValueError: column is required
ValueError: column not found in data
Returns:
pd.DataFrame, pd.Series: dataframe, mask of fil
"""
_keys = list(filter.keys())
if "column" not in _keys:
raise ValueError("column is required")
if isinstance(filter["column"], (tuple, list)) and len(filter["column"]) > 1:
if reduce(lambda a,b: (a not in data.columns.tolist()) & (b not in data.columns.tolist()), filter["column"]): #reduce list
raise ValueError("column not found in data")
column = filter["column"]
else:
column = filter["column"][0] if isinstance(filter["column"], (tuple, list)) else filter["column"]
if column not in data.columns.tolist():
raise ValueError("column not found in data")
if aux_func is not None:
mask = aux_func(data[column])
elif (intersec:=re.search(r'_and_|_or_', _keys[1])) is not None:
filter_str, value = _keys[1], filter[_keys[1]]
_filters = filter_str.split(intersec)
for _filter in _filters:
if "_and_" == intersec:
mask = DataFrameOptimized.mask_by(data, {"column": column, f"{_filter}": value}) if mask is None \
else mask & DataFrameOptimized.mask_by(data, {"column": column, f"{_filter}": value})[1]
elif "_or_" == intersec:
mask = DataFrameOptimized.mask_by(data, {"column": column, f"{_filter}": value}) if mask is None \
else mask | DataFrameOptimized.mask_by(data, {"column": column, f"{_filter}": value})[1]
else:
filter_str, value = _keys[1], filter[_keys[1]]
if "equal" in filter_str:
mask = data[column] == value
elif "diff" in filter_str:
mask = data[column] != value
elif "less" in filter_str:
mask = data[column] < value
elif "more" in filter_str:
mask = data[column] > value
elif "contains" in filter_str:
mask = data[column].str.contains(value)
return (data[mask], mask) if replace is True else (data, mask)
@staticmethod
def combine_columns(data: 'tuple[pd.DataFrame]', suffixes: 'tuple(str)', on: 'str' = None, left_on: 'str' = None, right_on: 'str' = None, how: 'str'= None, **kargs) -> pd.DataFrame:
if len(data) != 2:
raise ValueError("Invalid size for data")
_temp_table = data[0].merge(
right=data[1],
on=on,
left_on=left_on,
right_on=right_on,
how=how,
suffixes= suffixes,
indicator=True,
**kargs
)
columns_with_suffixes = filter(lambda column: reduce(lambda a,b: (a in column) | (b in column), suffixes), _temp_table.columns.tolist())
for idx, left_column in enumerate(columns_with_suffixes[::2]):
right_column = columns_with_suffixes[idx+1]
mask = pd.isna(left_column)
column_without_suffixes = columns_with_suffixes[0]
#delete suffixes
for suffix in suffixes:
column_without_suffixes.replace(suffix, "")
_temp_table.loc[~mask, column_without_suffixes] = _temp_table[left_column]
_temp_table.loc[mask, column_without_suffixes] = _temp_table[right_column]
_temp_table.drop([*columns_with_suffixes, "_merge"], axis = 1, inplace = True)
return _temp_table
@staticmethod
def get_table_excel(path: str, sheet: str, header_idx: 'list' = None, skiprows: 'list' = None, converters: 'list' = None, *args, **kargs) -> 'DataFrameOptimized':
"""Returns a DataFrame instance that will be used to parse the table at the given path .
Args:
path [str]: path of file
sheet [str]: sheet of data
header_idx [list]: list of each starting and ending column, max_len = 2, example: [0,5]
skiprows [list]: list of each starting and ending row, max_len = 2, example: [0,1000]
converters [list]: list of columns converters, same size that columns.
Returns:
[DataFrameOptimized]: instance of DataFrameOptimized
"""
try:
_data = utils.get_data_of_excel_sheet(
file_path=path, sheet=sheet, header_idx=header_idx, skiprows=skiprows, converters=converters)
_dt = DataFrameOptimized(_data, *args, **kargs)
return _dt
except Exception as e:
raise Exception(f"get_table_excel - {e}")
@staticmethod
def get_table_csv(path: str, *args, **kargs) -> 'DataFrameOptimized':
"""Returns a DataFrame instance that will be used to parse the table at the given path .
Raises:
Exception: [description]
Returns:
[type]: [description]
Examples
--------
DataFrameOptimized.get_table_csv(((1,2), (3,4)), columns=["col1", "col2"])
DataFrame
col1 col2
0 1 2
1 3 4
"""
try:
_dt = DataFrameOptimized(path, *args, **kargs)
return _dt
except Exception as e:
raise Exception(f"get_table_csv - {e}")
@staticmethod
def from_tuple(values: tuple, columns: tuple) -> 'Any':
"""Convert a tuple of values and columns to a DataFrameOptimized.
Raises:
Exception: if num of columns not is the same
Returns:
[vx.DataFrame]: DataFrame
Examples
--------
DataFrameOptimized.from_tuple(((1,2), (3,4)), columns=["col1", "col2"])
DataFrame
col1 col2
0 1 2
1 3 4
"""
try:
if len(values[0]) != len(columns): # if num of columns not is the same
raise Exception("values in row are different that columns")
_dt = DataFrameOptimized( | pd.DataFrame(values, columns=columns) | pandas.DataFrame |
def calculateAnyProfile(profileType, df_labs, df_meds, df_procedures, df_diagnoses, df_phenotypes):
"""Calculate a single profile based on the type provided and data cleaned from getSubdemographicsTables
Arguments:
profileType -- which individual profile type you would like generated, this will be the category with the header information
(Options: 'labs', 'medications', 'procedures', 'diagnoses', 'phenotypes')
Keywords:
df_labs -- labs dataframe returned from getSubdemographicsTables
df_medications -- medications dataframe returned from getSubdemographicsTables
df_procedures -- procedures dataframe returned from getSubdemographicsTables
df_diagnoses -- diagnoses dataframe returned from getSubdemographicsTables
df_phenotypes -- phenotypes dataframe returned from getSubdemographicsTables
Returns Pythonic structures needed to generate profile in JSON format using the corresponding write profile function
"""
import os
import sys
import sqlalchemy
import urllib.parse
import pandas as pd
import numpy as np
import getpass
from dataclasses import dataclass
from SciServer import Authentication
from datetime import datetime
import pymssql
try:
# Make Labs Profile
if profileType == 'labs':
# High Level Info, Scalar Distribution
labs_counts = df_labs.LAB_LOINC.value_counts()
grouped_labs = df_labs.groupby(['LAB_LOINC', 'resultYear'])
labs_frequencyPerYear = (df_labs.groupby(['LAB_LOINC','PATID','resultYear']).PATID.size()
.groupby(['LAB_LOINC','resultYear']).aggregate(np.mean))
labs_fractionOfSubjects = (np.divide(df_labs.groupby(['LAB_LOINC']).PATID.nunique(),
df_labs.PATID.nunique()))
labs_units = df_labs.groupby(['LAB_LOINC']).LOINC_UNIT.unique()
labs_names = df_labs.groupby(['LAB_LOINC']).LOINC_SHORTNAME.unique()
def percentile(n):
def percentile_(x):
return x.quantile(n*0.01)
percentile_.__name__ = '%s' % n
return percentile_
labs_stats = (grouped_labs
.RESULT_NUM.agg(['min','max', 'mean','median','std',
percentile(10), percentile(20), percentile(30),
percentile(40), percentile(50), percentile(60),
percentile(70), percentile(80), percentile(90)]))
def fracsAboveBelowNormal(x):
try:
aboveNorm = np.divide(np.sum(x.RESULT_NUM > x.range_high), x.RESULT_NUM.size)
belowNorm = np.divide(np.sum(x.RESULT_NUM < x.range_low), x.RESULT_NUM.size)
return pd.Series({'aboveNorm':aboveNorm, 'belowNorm':belowNorm})
except:
return pd.Series({'aboveNorm':np.nan, 'belowNorm':np.nan})
labs_aboveBelowNorm = (grouped_labs.apply(fracsAboveBelowNormal))
labs_correlatedLabsCoefficients = (df_labs.groupby(['LAB_LOINC','resultYear','PATID'])
.RESULT_NUM.mean())
labs_abscorrelation = 0
## LABS TO MEDICATIONS
def patientsAboveBelowNormalLabsMeds(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to meds table
abnormalPatientsMeds = df_meds[df_meds.PATID.isin(patientsAboveBelowNorm) &
(df_meds.startYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'medsAboveBelowNorm': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().index,
'counts': abnormalPatientsMeds.JH_INGREDIENT_RXNORM_CODE.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedMedsCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsMeds))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedMedsCoefficients.index:
thisLabYear = labs_correlatedMedsCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for medInd in range(len(labs_correlatedMedsCoefficients.loc[lab].medsAboveBelowNorm.values)):
mytups.append((thisLabYear.medsAboveBelowNorm.values[medInd], thisLabYear.counts[medInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedMedsCoefficients = (pd.DataFrame.from_records(mytups, columns=['JH_INGREDIENT_RXNORM_CODE','Relative_Counts'],
index=index))
## LABS TO PROCEDURES
def patientsAboveBelowNormalLabsProcs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsProcs = df_procedures[df_procedures.PATID.isin(patientsAboveBelowNorm) &
(df_procedures.encounterYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'procsAboveBelowNorm': abnormalPatientsProcs.RAW_PX.value_counts().index,
'counts': abnormalPatientsProcs.RAW_PX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedProceduresCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsProcs))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedProceduresCoefficients.index:
thisLabYear = labs_correlatedProceduresCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for procInd in range(len(labs_correlatedProceduresCoefficients.loc[lab].procsAboveBelowNorm.values)):
mytups.append((thisLabYear.procsAboveBelowNorm.values[procInd], thisLabYear.counts[procInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedProceduresCoefficients = (pd.DataFrame.from_records(mytups, columns=['RAW_PX','Relative_Counts'],
index=index))
## LABS TO DIAGNOSES
def patientsAboveBelowNormalLabsDiags(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsDiags = df_diagnoses[df_diagnoses.PATID.isin(patientsAboveBelowNorm) &
(df_diagnoses.admitYear == pd.to_datetime(x.RESULT_DATE).dt.year.unique()[0])]
return pd.Series({'diagsAboveBelowNorm': abnormalPatientsDiags.DX.value_counts().index,
'counts': abnormalPatientsDiags.DX.value_counts().values})
# Need to grab the indices of those with abnormal lab, grab their medications, count and rank them
labs_correlatedDiagnosisCoefficients = (grouped_labs.apply(patientsAboveBelowNormalLabsDiags))
# Currently a little hacky, but seems fast
mytups = list()
multiIndex = list()
for lab in labs_correlatedDiagnosisCoefficients.index:
thisLabYear = labs_correlatedDiagnosisCoefficients.loc[lab]
thisLab = lab[0]
thisYear = lab[1]
totalCrossTab = np.sum(thisLabYear.counts)
for diagInd in range(len(labs_correlatedDiagnosisCoefficients.loc[lab].diagsAboveBelowNorm.values)):
mytups.append((thisLabYear.diagsAboveBelowNorm.values[diagInd], thisLabYear.counts[diagInd]/totalCrossTab))
multiIndex.append((thisLab, thisYear))
index = pd.MultiIndex.from_tuples(multiIndex)
labs_correlatedDiagnosisCoefficients = (pd.DataFrame.from_records(mytups, columns=['DX','Relative_Counts'],
index=index))
## LABS TO PHENOTYPES
def patientsAboveBelowNormalLabsHPOs(x):
# Get patients above and below normal
patientsAboveNorm = x.PATID[x.RESULT_NUM > x.range_high].tolist()
patientsBelowNorm = x.PATID[x.RESULT_NUM < x.range_low].tolist()
# Get unique patient IDs for above & below normal
patientsAboveBelowNorm = list(set(patientsAboveNorm + patientsBelowNorm))
# Link to procs table
abnormalPatientsHPOs = df_phenotypes[df_phenotypes.PATID.isin(patientsAboveBelowNorm) &
(df_phenotypes.admitYear == | pd.to_datetime(x.RESULT_DATE) | pandas.to_datetime |
import os,sys
import argparse
import pandas as pd
import timeit
import copy
import json
from utils import filter_columns_from_kb, query_equal_from_kb, query_unequal_from_kb
from revertible_string import RevertibleString
# babi6_filter_keys = ['@R_cuisine', '@R_location', '@R_price']
###
# BABI Function
###
class BABI7Lexicalizer():
# Constant definition
api_call_pattern = 'api_call'
delex_prefix = '@'
delex_keys = ['@address', '@area', '@food', '@location', '@phone', '@pricerange', '@postcode', '@type', '@id', '@name']
filter_keys = ['@food', '@area', '@pricerange']
# Function to read knwoledge base as pandas dataframe sorted by rating from the given path
@staticmethod
def read_knowledge_base(path):
kb_dict = json.load(open(path, 'r'))
for i in range(len(kb_dict)):
kb_item = kb_dict[i]
for k in kb_item.keys():
if(k == "postcode"):
kb_item[k] = kb_item[k].replace(".","").replace(",","").replace(" ","").lower()
else:
kb_item[k] = kb_item[k].replace(" ","_").lower()
kb = pd.DataFrame.from_records(kb_dict).fillna('<UNK>')
kb.columns = [f'@{column}' for column in kb.columns]
kb['@food'] = kb['@food'].fillna('international')
kb['@phone'] = kb['@phone'].fillna('01223_000000')
return kb
# Function to read knwoledge base modifier and update the existing kb
@staticmethod
def modify_knowledge_base(kb, path):
raise NotImplementedError
# return kb
# Function to read dialogue from the given path
@staticmethod
def read_dialogue(template_path):
dialogues = []
dialogue = []
for line in open(template_path,'r').readlines():
if len(line) == 1: # Only \n
dialogues.append(dialogue)
dialogue = []
else:
first_space_index = line.index(' ')
turn_id = line[:first_space_index]
conv = line[first_space_index+1:].replace('\n','')
request, response = conv.split('\t')
dialogue.append((turn_id, RevertibleString(request), RevertibleString(response)))
return dialogues
# Function to generate metadata from all BABI dialogues
@staticmethod
def generate_metadata(dialogues):
delexicalized_dialog_meta = [] # Buffer containing list of tuple(dialog, delex_dict, delex_resolved_args_list, max_delex_index)
for dialogue in dialogues:
delex_to_chat_dict = { } # Dictionary of recoderd delexicalized word to list of Chat object containing the corresponding word
delex_resolved_args_list = [] # List of all recorded delexicalized words in api_call that need to be resolved for generation
max_delex_index = 0
query_max_delex_index = 0
for turn_id, request, response in dialogue:
# Process request & response
for chat in [request, response]:
if BABI7Lexicalizer.delex_prefix in chat.str:
for delex_key in BABI7Lexicalizer.delex_keys:
# TODO : harcoded, max number of entity in babi_7 is only 7, should change this one with count if possible
for i in range(1, 9):
recorded_delex_word = f'{delex_key}_{i}'
if recorded_delex_word in chat.str:
if recorded_delex_word not in delex_to_chat_dict:
delex_to_chat_dict[recorded_delex_word] = []
delex_to_chat_dict[recorded_delex_word].append(chat)
if max_delex_index < i:
max_delex_index = i
# If api_call
if response.str.startswith(BABI7Lexicalizer.api_call_pattern):
delex_words = response.str.split(' ')[1:]
delex_resolved_args = []
for delex_word in delex_words:
if delex_word.startswith(BABI7Lexicalizer.delex_prefix):
delex_resolved_args.append(delex_word)
index = int(delex_word[-1])
query_max_delex_index = max_delex_index
delex_resolved_args_list.append(delex_resolved_args)
# Add result to global metadata buffer
delexicalized_dialog_meta.append((dialogue, delex_to_chat_dict, delex_resolved_args_list, max_delex_index, query_max_delex_index))
return delexicalized_dialog_meta
# Generate knowledge base index function
@staticmethod
def generate_kb_index(kb):
possible_filter_keys_list = [ # TODO: FU**IN hardcoded combination
BABI7Lexicalizer.filter_keys, # 3 Keys
BABI7Lexicalizer.filter_keys[:2], BABI7Lexicalizer.filter_keys[1:], BABI7Lexicalizer.filter_keys[::2], # 2 Keys
[BABI7Lexicalizer.filter_keys[0]], [BABI7Lexicalizer.filter_keys[1]], [BABI7Lexicalizer.filter_keys[2]] # 1 Key
]
default_index = pd.DataFrame({'index':['_'],'filter_type':['_'],'num_entity':[kb.shape[0]],'kb':[kb]}).set_index('index')
index_kbs = [default_index]
for possible_filter_keys in possible_filter_keys_list:
possible_queries_df = kb[possible_filter_keys].drop_duplicates()
filter_type = '_'.join(possible_filter_keys)
index_keys = []
filter_types = []
kb_sizes = []
filtered_kbs = []
for row in possible_queries_df.to_dict('records'):
filters = [(attr,value) for attr, value in row.items()]
filtered_kb = query_equal_from_kb(kb, filters)
index_keys.append('_'.join([value for value in row.values()]))
kb_sizes.append(filtered_kb.shape[0])
filter_types.append(filter_type)
filtered_kbs.append(filtered_kb)
index_data = {'index':index_keys,'filter_type':filter_types,'num_entity':kb_sizes,'kb':filtered_kbs}
index_kbs.append(pd.DataFrame(index_data).set_index('index'))
index_kb = pd.concat(index_kbs)
return index_kb
# Generate dialogue index function
@staticmethod
def generate_dialogue_index(dialogues):
delexicalized_dialog_meta = BABI7Lexicalizer.generate_metadata(dialogues)
meta_data_list = []
num_entity_list = []
filter_type_list = []
for dialogue_meta in delexicalized_dialog_meta:
_, _, delex_resolved_args_list, max_delex_index, query_max_delex_index = dialogue_meta
meta_data_list.append(dialogue_meta)
num_entity_list.append(max_delex_index - query_max_delex_index)
if len(delex_resolved_args_list) > 0:
# There is only 1 api_call maximum in babi7, process the first element if any
if len(delex_resolved_args_list[0]) == 0:
# There is api_call with no delexicalized parameter
filter_type_list.append('_')
else:
filter_type_list.append('_'.join([delex_word[:-2] for delex_word in delex_resolved_args_list[0]]))
else:
# If there is no api_call, add to global index
filter_type_list.append('_')
index_dialog = | pd.DataFrame({'filter_type':filter_type_list, 'num_entity':num_entity_list,'meta':meta_data_list}) | pandas.DataFrame |
"""
Generic code to plot any mooring extraction
"""
from lo_tools import Lfun, zfun
from lo_tools import plotting_functions as pfun
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
Ldir = Lfun.Lstart()
# choose the file
in_dir0 = Ldir['LOo'] / 'extract'
gtagex = Lfun.choose_item(in_dir0, tag='', exclude_tag='', itext='** Choose gtagex from list **')
in_dir = in_dir0 / gtagex / 'moor'
moor_name = Lfun.choose_item(in_dir, tag='.nc', exclude_tag='', itext='** Choose mooring extraction from list **')
moor_fn = in_dir / moor_name
# load everything using xarray
ds = xr.load_dataset(moor_fn)
ot = ds.ocean_time.values
ot_dt = pd.to_datetime(ot)
t = (ot_dt - ot_dt[0]).total_seconds().to_numpy()
T = t/86400 # time in days from start
print('time step of mooring'.center(60,'-'))
print(t[1])
print('time limits'.center(60,'-'))
print('start ' + str(ot_dt[0]))
print('end ' + str(ot_dt[-1]))
print('info'.center(60,'-'))
VN_list = []
for vn in ds.data_vars:
print('%s %s' % (vn, ds[vn].shape))
VN_list.append(vn)
# populate lists of variables to plot
vn2_list = ['zeta']
if 'Pair' in VN_list:
vn2_list += ['shflux', 'swrad']
vn3_list = []
if 'salt' in VN_list:
vn3_list += ['salt', 'temp']
if 'NO3' in VN_list:
vn3_list += ['oxygen']
# if 'u' in VN_list:
# vn3_list += ['u', 'v']
# plot time series using a pandas DataFrame
df = | pd.DataFrame(index=ot) | pandas.DataFrame |
"""
Test all of the methods for loading data into PandaSpark
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas
from tempfile import NamedTemporaryFile
from sparklingpandas.test.sparklingpandastestcase import \
SparklingPandasTestCase
import pandas
import sys
import unittest
class DataLoad(SparklingPandasTestCase):
"""
Class of data loading tests.
"""
def test_from_tuples(self):
"""
Test loading the data from a python tuples.
"""
input = [("tea", "happy"), ("water", "sad"), ("coffee", "happiest")]
pframe = self.psc.DataFrame(input, columns=['magic', 'thing'])
collectedframe = pframe.collect().sort(['magic'])
shouldeq = | pandas.DataFrame(input, columns=['magic', 'thing']) | pandas.DataFrame |
def Moder_merger(params : dict):
def Solo_M1mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 72.081324
mz_Cl = 34.968853 + mz - 72.081324
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 44.997654
mz_Cl = 34.968853 + mz - 44.997654
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
mz_Cl = 34.968853 + mz - 66.979600
mz_m2HpNa = 20.97412 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M1m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M1m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 36.948058
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 72.081324)/2
mz_Cl = 34.968853 + (mz - 72.081324)/2
mz_m2HpNa = 20.97412 + (mz - 72.081324)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 44.997654)/2
mz_Cl = 34.968853 + (mz - 44.997654)/2
mz_m2HpNa = 20.97412 + (mz - 44.997654)/2
mz_mHpHCOOH = 44.997654 + (mz - 44.997654)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_mHpHCOOH = peaks.between(mz_mHpHCOOH - prec_mass_error, mz_mHpHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_mHpHCOOH])
def Solo_M2mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/2
mz_Cl = 34.968853 + (mz + 1.007825)/2
mz_m2HpNa = 20.97412 + (mz + 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/2
mz_Cl = 34.968853 + (mz - 34.968853)/2
mz_m2HpNa = 20.97412 + (mz - 34.968853)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/2
mz_Cl = 34.968853 + (mz - 66.979600)/2
mz_m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/2
mz_Cl = 34.968853 + (mz - 20.97412)/2
mz_m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 36.948058)/2
mz_Cl = 34.968853 + (mz - 36.948058)/2
mz_m2HpNa = 20.97412 + (mz - 36.948058)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 36.948058)/2
mz_m2HpK = 36.948058 + (mz - 36.948058)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK])
def Solo_M3mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/3
mz_Cl = 34.968853 + (mz + 1.007825)/3
mz_m2HpNa = 20.97412 + (mz + 1.007825)/3
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/3
mz_m2HpK = 36.948058 + (mz + 1.007825)/3
mz_M2mH = -1.007825 + (mz + 1.007825)*(2/3)
mz_M2pCl = 34.968853 + (mz + 1.007825)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(2/3)
mz_M2m2HpK = 36.948058 + (mz + 1.007825)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/3
mz_Cl = 34.968853 + (mz - 34.968853)/3
mz_m2HpNa = 20.97412 + (mz - 34.968853)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/3
mz_m2HpK = 36.948058 + (mz - 34.968853)/3
mz_M2mH = -1.007825 + (mz - 34.968853)*(2/3)
mz_M2pCl = 34.968853 + (mz - 34.968853)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 34.968853)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/3
mz_Cl = 34.968853 + (mz - 66.979600)/3
mz_m2HpNa = 20.97412 + (mz - 66.979600)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/3
mz_m2HpK = 36.948058 + (mz - 66.979600)/3
mz_M2mH = -1.007825 + (mz - 66.979600)*(2/3)
mz_M2pCl = 34.968853 + (mz - 66.979600)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 66.979600)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/3
mz_Cl = 34.968853 + (mz - 20.97412)/3
mz_m2HpNa = 20.97412 + (mz - 20.97412)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/3
mz_m2HpK = 36.948058 + (mz - 20.97412)/3
mz_M2mH = -1.007825 + (mz - 20.97412)*(2/3)
mz_M2pCl = 34.968853 + (mz - 20.97412)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 20.97412)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M4mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/4
mz_Cl = 34.968853 + (mz + 1.007825)/4
mz_m2HpNa = 20.97412 + (mz + 1.007825)/4
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/4
mz_m2HpK = 36.948058 + (mz + 1.007825)/4
mz_M2mH = -1.007825 + (mz + 1.007825)/2
mz_M2pCl = 34.968853 + (mz + 1.007825)/2
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/2
mz_M2m2HpK = 36.948058 + (mz + 1.007825)/2
mz_M3mH = -1.007825 + (mz + 1.007825)*(3/4)
mz_M3pCl = 34.968853 + (mz + 1.007825)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz + 1.007825)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(3/4)
mz_M3m2HpK = 36.948058 + (mz + 1.007825)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/4
mz_Cl = 34.968853 + (mz - 34.968853)/4
mz_m2HpNa = 20.97412 + (mz - 34.968853)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/4
mz_m2HpK = 36.948058 + (mz - 34.968853)/4
mz_M2mH = -1.007825 + (mz - 34.968853)/2
mz_M2pCl = 34.968853 + (mz - 34.968853)/2
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/2
mz_M2m2HpK = 36.948058 + (mz - 34.968853)/2
mz_M3mH = -1.007825 + (mz - 34.968853)*(3/4)
mz_M3pCl = 34.968853 + (mz - 34.968853)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 34.968853)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 34.968853)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/4
mz_Cl = 34.968853 + (mz - 66.979600)/4
mz_m2HpNa = 20.97412 + (mz - 66.979600)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/4
mz_m2HpK = 36.948058 + (mz - 66.979600)/4
mz_M2mH = -1.007825 + (mz - 66.979600)/2
mz_M2pCl = 34.968853 + (mz - 66.979600)/2
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
mz_M2m2HpK = 36.948058 + (mz - 66.979600)/2
mz_M3mH = -1.007825 + (mz - 66.979600)*(3/4)
mz_M3pCl = 34.968853 + (mz - 66.979600)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 66.979600)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 66.979600)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/4
mz_Cl = 34.968853 + (mz - 20.97412)/4
mz_m2HpNa = 20.97412 + (mz - 20.97412)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/4
mz_m2HpK = 36.948058 + (mz - 20.97412)/4
mz_M2mH = -1.007825 + (mz - 20.97412)/2
mz_M2pCl = 34.968853 + (mz - 20.97412)/2
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
mz_M2m2HpK = 36.948058 + (mz - 20.97412)/2
mz_M3mH = -1.007825 + (mz - 20.97412)*(3/4)
mz_M3pCl = 34.968853 + (mz - 20.97412)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 20.97412)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 20.97412)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M2pH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 42.034374)/2
mz_Na = 22.98977 + (mz - 42.034374)/2
mz_K = 38.963708 + (mz - 42.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 42.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 42.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 42.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 42.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 42.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 42.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 33.034040)/2
mz_Na = 22.98977 + (mz - 33.034040)/2
mz_K = 38.963708 + (mz - 33.034040)/2
mz_HpCH3CN = 42.034374 + (mz - 33.034040)/2
mz_HpCH3OH = 33.034040 + (mz - 33.034040)/2
mz_NapCH3CN = 64.016319 + (mz - 33.034040)/2
mz_NapCH3OH = 55.015985 + (mz - 33.034040)/2
mz_KpCH3CN = 79.990257 + (mz - 33.034040)/2
mz_KpCH3OH = 70.989923 + (mz - 33.034040)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 47.013304)/2
mz_Na = 22.98977 + (mz - 47.013304)/2
mz_K = 38.963708 + (mz - 47.013304)/2
mz_HpCH3CN = 42.034374 + (mz - 47.0133042)/2
mz_HpCH3OH = 33.034040 + (mz - 47.013304)/2
mz_NapCH3CN = 64.016319 + (mz - 47.013304)/2
mz_NapCH3OH = 55.015985 + (mz - 47.013304)/2
mz_KpCH3CN = 79.990257 + (mz - 47.013304)/2
mz_KpCH3OH = 70.989923 + (mz - 47.013304)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = | pd.Series(mgf_file[ion_idx].peaks.mz) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with | tm.assertRaises(NotImplementedError) | pandas.util.testing.assertRaises |
#############################################################
# ActivitySim verification against TM1
# <NAME>, <EMAIL>, 02/22/19
# C:\projects\activitysim\verification>python compare_results.py
#############################################################
import pandas as pd
import openmatrix as omx
#############################################################
# INPUTS
#############################################################
pipeline_filename = 'asim/pipeline.h5'
distance_matrix_filename = "asim/skims.omx"
asim_nmtf_alts_filename = "asim/non_mandatory_tour_frequency_alternatives.csv"
process_sp = True # False skip work/sch shadow pricing comparisons, True do them
process_tm1 = True # False only processes asim, True processes tm1 as well
asim_sp_work_filename = "asim/shadow_price_workplace_modeled_size_10.csv"
asim_sp_school_filename = "asim/shadow_price_school_modeled_size_10.csv"
asim_sp_school_no_sp_filename = "asim/shadow_price_school_modeled_size_1.csv"
tm1_access_filename = "tm1/accessibility.csv"
tm1_sp_filename = "tm1/ShadowPricing_9.csv"
tm1_work_filename = "tm1/wsLocResults_1.csv"
tm1_ao_filename = "tm1/aoResults.csv"
tm1_hh_filename = "tm1/householdData_1.csv"
tm1_cdap_filename = "tm1/cdapResults.csv"
tm1_per_filename = "tm1/personData_1.csv"
tm1_tour_filename = "tm1/indivTourData_1.csv"
tm1_jtour_filename = "tm1/jointTourData_1.csv"
tm1_trips_filename = "tm1/indivTripData_1.csv"
tm1_jtrips_filename = "tm1/jointTripData_1.csv"
#############################################################
# OUTPUT FILES FOR DEBUGGING
#############################################################
asim_zones_filename = "asim/asim_zones.csv"
asim_access_filename = "asim/asim_access.csv"
asim_per_filename = "asim/asim_per.csv"
asim_hh_filename = "asim/asim_hh.csv"
asim_tour_filename = "asim/asim_tours.csv"
asim_trips_filename = "asim/asim_trips.csv"
#############################################################
# COMMON LABELS
#############################################################
ptypes = ["", "Full-time worker", "Part-time worker", "University student", "Non-worker",
"Retired", "Student of driving age", "Student of non-driving age",
"Child too young for school"]
mode_labels = ["", "DRIVEALONEFREE", "DRIVEALONEPAY", "SHARED2FREE", "SHARED2PAY", "SHARED3FREE",
"SHARED3PAY", "WALK", "BIKE", "WALK_LOC", "WALK_LRF", "WALK_EXP", "WALK_HVY",
"WALK_COM", "DRIVE_LOC", "DRIVE_LRF", "DRIVE_EXP", "DRIVE_HVY", "DRIVE_COM"]
#############################################################
# DISTANCE SKIM
#############################################################
# read distance matrix (DIST)
distmat = omx.open_file(distance_matrix_filename)["DIST"][:]
#############################################################
# EXPORT TABLES
#############################################################
# write tables for verification
tazs = pd.read_hdf(pipeline_filename, "land_use/initialize_landuse")
tazs["zone"] = tazs.index
tazs.to_csv(asim_zones_filename, index=False)
access = pd.read_hdf(pipeline_filename, "accessibility/compute_accessibility")
access.to_csv(asim_access_filename, index=False)
hh = pd.read_hdf(pipeline_filename, "households/joint_tour_frequency")
hh["household_id"] = hh.index
hh.to_csv(asim_hh_filename, index=False)
per = pd.read_hdf(pipeline_filename, "persons/non_mandatory_tour_frequency")
per["person_id"] = per.index
per.to_csv(asim_per_filename, index=False)
tours = pd.read_hdf(pipeline_filename, "tours/stop_frequency")
tours["tour_id"] = tours.index
tours.to_csv(asim_tour_filename, index=False)
trips = pd.read_hdf(pipeline_filename, "trips/trip_mode_choice")
trips["trip_id"] = trips.index
trips.to_csv(asim_trips_filename, index=False)
#############################################################
# AGGREGATE
#############################################################
# accessibilities
if process_tm1:
tm1_access = pd.read_csv(tm1_access_filename)
tm1_access.to_csv("outputs/tm1_access.csv", na_rep=0)
asim_access = pd.read_csv(asim_access_filename)
asim_access.to_csv("outputs/asim_access.csv", na_rep=0)
#############################################################
# HOUSEHOLD AND PERSON
#############################################################
# work and school location
if process_sp:
if process_tm1:
tm1_markets = ["work_low", "work_med", "work_high", "work_high", "work_very high", "university",
"school_high", "school_grade"]
tm1 = pd.read_csv(tm1_sp_filename)
tm1 = tm1.groupby(tm1["zone"]).sum()
tm1["zone"] = tm1.index
tm1 = tm1.loc[tm1["zone"] > 0]
ws_size = tm1[["zone"]]
for i in range(len(tm1_markets)):
ws_size[tm1_markets[i] + "_modeledDests"] = tm1[tm1_markets[i] + "_modeledDests"]
ws_size.to_csv("outputs/tm1_work_school_location.csv", na_rep=0)
asim_markets = ["work_low", "work_med", "work_high", "work_high", "work_veryhigh", "university",
"highschool", "gradeschool"]
asim = pd.read_csv(asim_sp_work_filename)
asim_sch = pd.read_csv(asim_sp_school_filename)
asim_sch_no_sp = pd.read_csv(asim_sp_school_no_sp_filename)
asim_sch["gradeschool"] = asim_sch_no_sp["gradeschool"] # grade school not shadow priced
asim = asim.set_index("TAZ", drop=False)
asim_sch = asim_sch.set_index("TAZ", drop=False)
asim["gradeschool"] = asim_sch["gradeschool"].loc[asim["TAZ"]].tolist()
asim["highschool"] = asim_sch["highschool"].loc[asim["TAZ"]].tolist()
asim["university"] = asim_sch["university"].loc[asim["TAZ"]].tolist()
ws_size = asim[["TAZ"]]
for i in range(len(asim_markets)):
ws_size[asim_markets[i] + "_asim"] = asim[asim_markets[i]]
ws_size.to_csv("outputs/asim_work_school_location.csv", na_rep=0)
# work county to county flows
tazs = pd.read_csv(asim_zones_filename)
counties = ["", "SF", "SM", "SC", "ALA", "CC", "SOL", "NAP", "SON", "MAR"]
tazs["COUNTYNAME"] = pd.Series(counties)[tazs["county_id"].tolist()].tolist()
tazs = tazs.set_index("zone", drop=False)
if process_tm1:
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work["HomeCounty"] = tazs["COUNTYNAME"].loc[tm1_work["HomeTAZ"]].tolist()
tm1_work["WorkCounty"] = tazs["COUNTYNAME"].loc[tm1_work["WorkLocation"]].tolist()
tm1_work_counties = tm1_work.groupby(["HomeCounty", "WorkCounty"]).count()["HHID"]
tm1_work_counties = tm1_work_counties.reset_index()
tm1_work_counties = tm1_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
tm1_work_counties.to_csv("outputs/tm1_work_counties.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap["HomeCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["home_taz"]].tolist()
asim_cdap["WorkCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["workplace_zone_id"]].tolist()
asim_work_counties = asim_cdap.groupby(["HomeCounty", "WorkCounty"]).count()["household_id"]
asim_work_counties = asim_work_counties.reset_index()
asim_work_counties = asim_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
asim_work_counties.to_csv("outputs/asim_work_counties.csv", na_rep=0)
# auto ownership - count of hhs by num autos by taz
if process_tm1:
tm1_ao = pd.read_csv(tm1_ao_filename)
tm1_hh = pd.read_csv(tm1_hh_filename)
tm1_ao = tm1_ao.set_index("HHID", drop=False)
tm1_hh["ao"] = tm1_ao["AO"].loc[tm1_hh["hh_id"]].tolist()
tm1_autos = tm1_hh.groupby(["taz", "ao"]).count()["hh_id"]
tm1_autos = tm1_autos.reset_index()
tm1_autos = tm1_autos.pivot(index="taz", columns="ao")
tm1_autos.to_csv("outputs/tm1_autos.csv", na_rep=0)
asim_ao = pd.read_csv(asim_hh_filename)
asim_autos = asim_ao.groupby(["TAZ", "auto_ownership"]).count()["SERIALNO"]
asim_autos = asim_autos.reset_index()
asim_autos = asim_autos.pivot(index="TAZ", columns="auto_ownership")
asim_autos.to_csv("outputs/asim_autos.csv", na_rep=0)
# cdap - ptype count and ptype by M,N,H
if process_tm1:
tm1_cdap = pd.read_csv(tm1_cdap_filename)
tm1_cdap_sum = tm1_cdap.groupby(["PersonType", "ActivityString"]).count()["HHID"]
tm1_cdap_sum = tm1_cdap_sum.reset_index()
tm1_cdap_sum = tm1_cdap_sum.pivot(index="PersonType", columns="ActivityString")
tm1_cdap_sum.to_csv("outputs/tm1_cdap.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap_sum = asim_cdap.groupby(["ptype", "cdap_activity"]).count()["household_id"]
asim_cdap_sum = asim_cdap_sum.reset_index()
asim_cdap_sum = asim_cdap_sum.pivot(index="ptype", columns="cdap_activity")
asim_cdap_sum.to_csv("outputs/asim_cdap.csv", na_rep=0)
# free parking by ptype
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_per["fp_choice"] = (tm1_per["fp_choice"] == 1) # 1=free, 2==pay
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work = tm1_work.set_index("PersonID", drop=False)
tm1_per["WorkLocation"] = tm1_work["WorkLocation"].loc[tm1_per["person_id"]].tolist()
tm1_fp = tm1_per[tm1_per["WorkLocation"] > 0]
tm1_fp = tm1_fp.groupby(["type", "fp_choice"]).count()["hh_id"]
tm1_fp = tm1_fp.reset_index()
tm1_fp = tm1_fp.pivot(index="type", columns="fp_choice")
tm1_fp.to_csv("outputs/tm1_fp.csv", na_rep=0)
asim_cdap["ptypename"] = pd.Series(ptypes)[asim_cdap["ptype"].tolist()].tolist()
asim_fp = asim_cdap.groupby(["ptypename", "free_parking_at_work"]).count()["household_id"]
asim_fp = asim_fp.reset_index()
asim_fp = asim_fp.pivot(index="ptypename", columns="free_parking_at_work")
asim_fp.to_csv("outputs/asim_fp.csv", na_rep=0)
# value of time
if process_tm1:
tm1_per = pd.read_csv(tm1_per_filename)
tm1_per["vot_bin"] = pd.cut(tm1_per["value_of_time"], range(51))
tm1_per.groupby(["vot_bin"]).count()["hh_id"].to_csv("outputs/tm1_vot.csv", na_rep=0)
asim_per = pd.read_csv(asim_per_filename)
asim_per["vot_bin"] = pd.cut(asim_per["value_of_time"], range(51))
asim_per.groupby(["vot_bin"]).count()["household_id"].to_csv("outputs/asim_vot.csv", na_rep=0)
#############################################################
# TOUR
#############################################################
# indiv mandatory tour freq
tm1_imf_codes = ["", "0", "work1", "work2", "school1", "school2", "work_and_school"]
if process_tm1:
tm1_per = | pd.read_csv(tm1_per_filename) | pandas.read_csv |
from datetime import timedelta
from functools import partial
from itertools import permutations
import dask.bag as db
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from kartothek.core.cube.conditions import (
C,
Conjunction,
EqualityCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
InIntervalCondition,
IsInCondition,
LessEqualCondition,
LessThanCondition,
)
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import build_cube_from_bag
from kartothek.io.eager import build_dataset_indices
from kartothek.io.eager_cube import append_to_cube, build_cube, remove_partitions
__all__ = (
"apply_condition_unsafe",
"data_no_part",
"fullrange_cube",
"fullrange_data",
"fullrange_df",
"massive_partitions_cube",
"massive_partitions_data",
"massive_partitions_df",
"multipartition_cube",
"multipartition_df",
"no_part_cube",
"no_part_df",
"other_part_cube",
"sparse_outer_cube",
"sparse_outer_data",
"sparse_outer_df",
"sparse_outer_opt_cube",
"sparse_outer_opt_df",
"test_complete",
"test_condition",
"test_condition_on_null",
"test_cube",
"test_delayed_index_build_correction_restriction",
"test_delayed_index_build_partition_by",
"test_df",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_empty_dimension_columns",
"test_fail_missing_condition_columns",
"test_fail_missing_dimension_columns",
"test_fail_missing_partition_by",
"test_fail_missing_payload_columns",
"test_fail_no_store_factory",
"test_fail_projection",
"test_fail_unindexed_partition_by",
"test_fail_unstable_dimension_columns",
"test_fail_unstable_partition_by",
"test_filter_select",
"test_hypothesis",
"test_overlay_tricky",
"test_partition_by",
"test_projection",
"test_select",
"test_simple_roundtrip",
"test_sort",
"test_stresstest_index_select_row",
"test_wrong_condition_type",
"testset",
"updated_cube",
"updated_df",
)
@pytest.fixture(scope="module")
def fullrange_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{
"y": [0, 1, 2, 3, 0, 1, 2, 3],
"z": 0,
"p": [0, 0, 1, 1, 0, 0, 1, 1],
"q": [0, 0, 0, 0, 1, 1, 1, 1],
"v3": np.arange(8),
"i3": np.arange(8),
}
),
}
@pytest.fixture(scope="module")
def fullrange_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="fullrange_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=fullrange_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def multipartition_cube(module_store, fullrange_data, fullrange_cube):
def _gen(part):
result = {}
for dataset_id, df in fullrange_data.items():
df = df.copy()
df["z"] = part
result[dataset_id] = df
return result
cube = fullrange_cube.copy(uuid_prefix="multipartition_cube")
build_cube_from_bag(
data=db.from_sequence([0, 1], partition_size=1).map(_gen),
store=module_store,
cube=cube,
ktk_cube_dataset_ids=["seed", "enrich_dense", "enrich_sparse"],
).compute()
return cube
@pytest.fixture(scope="module")
def sparse_outer_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"i1": [0, 3, 7],
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 0],
"y": [0, 1],
"z": 0,
"p": [0, 2],
"q": 0,
"v2": [0, 7],
"i2": [0, 7],
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 0], "z": 0, "p": [0, 1], "q": 0, "v3": [0, 3], "i3": [0, 3]}
),
}
@pytest.fixture(scope="module")
def sparse_outer_cube(module_store, sparse_outer_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="sparse_outer_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=sparse_outer_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def sparse_outer_opt_cube(
module_store,
sparse_outer_data,
sparse_outer_cube,
sparse_outer_df,
sparse_outer_opt_df,
):
data = {}
for dataset_id in sparse_outer_data.keys():
df = sparse_outer_data[dataset_id].copy()
for col in sparse_outer_opt_df.columns:
if col in df.columns:
dtype = sparse_outer_opt_df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
data[dataset_id] = df
cube = sparse_outer_cube.copy(uuid_prefix="sparse_outer_opt_cube")
build_cube(data=data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def massive_partitions_data():
n = 17
return {
"seed": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"i1": np.arange(n),
}
),
"enrich_1": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v2": np.arange(n),
"i2": np.arange(n),
}
),
"enrich_2": pd.DataFrame(
{
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v3": np.arange(n),
"i3": np.arange(n),
}
),
}
@pytest.fixture(scope="module")
def massive_partitions_cube(module_store, massive_partitions_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="massive_partitions_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=massive_partitions_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def fullrange_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def multipartition_df(fullrange_df):
dfs = []
for z in (0, 1):
df = fullrange_df.copy()
df["z"] = z
dfs.append(df)
return (
pd.concat(dfs, ignore_index=True)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"v2": [0, np.nan, 7],
"v3": [0, 3, np.nan],
"i1": [0, 3, 7],
"i2": [0, np.nan, 7],
"i3": [0, 3, np.nan],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_opt_df(sparse_outer_df):
df = sparse_outer_df.copy()
df["x"] = df["x"].astype(np.int16)
df["y"] = df["y"].astype(np.int32)
df["z"] = df["z"].astype(np.int8)
df["v1"] = df["v1"].astype(np.int8)
df["i1"] = df["i1"].astype(np.int8)
return df
@pytest.fixture(scope="module")
def massive_partitions_df():
n = 17
return (
pd.DataFrame(
data={
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"v2": np.arange(n),
"v3": np.arange(n),
"i1": np.arange(n),
"i2": np.arange(n),
"i3": np.arange(n),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def updated_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="updated_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"i1": np.arange(6),
}
),
"enrich": pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v2": np.arange(6),
"i2": np.arange(6),
}
),
"extra": pd.DataFrame(
{
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v3": np.arange(6),
"i3": np.arange(6),
}
),
},
store=module_store,
cube=cube,
)
remove_partitions(
cube=cube,
store=module_store,
ktk_cube_dataset_ids=["enrich"],
conditions=C("p") >= 1,
)
append_to_cube(
data={
"enrich": pd.DataFrame(
{
"x": [1, 1],
"y": [0, 1],
"z": 0,
"p": [1, 1],
"q": 0,
"v2": [7, 8],
"i2": [7, 8],
}
)
},
store=module_store,
cube=cube,
)
return cube
@pytest.fixture(scope="module")
def updated_df():
return (
pd.DataFrame(
data={
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"v2": [0, 1, 7, 8, np.nan, np.nan],
"v3": np.arange(6),
"i1": np.arange(6),
"i2": [0, 1, 7, 8, np.nan, np.nan],
"i3": np.arange(6),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def data_no_part():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 1, 2, 3], "z": 0, "v3": np.arange(4), "i3": np.arange(4)}
),
}
@pytest.fixture(scope="module")
def no_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="data_no_part",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": [], "enrich_sparse": []},
)
return cube
@pytest.fixture(scope="module")
def other_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="other_part_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": ["i2"], "enrich_sparse": ["i3"]},
)
return cube
@pytest.fixture(scope="module")
def no_part_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(
params=[
"fullrange",
"multipartition",
"sparse_outer",
"sparse_outer_opt",
"massive_partitions",
"updated",
"no_part",
"other_part",
],
scope="module",
)
def testset(request):
return request.param
@pytest.fixture(scope="module")
def test_cube(
testset,
fullrange_cube,
multipartition_cube,
sparse_outer_cube,
sparse_outer_opt_cube,
massive_partitions_cube,
updated_cube,
no_part_cube,
other_part_cube,
):
if testset == "fullrange":
return fullrange_cube
elif testset == "multipartition":
return multipartition_cube
elif testset == "sparse_outer":
return sparse_outer_cube
elif testset == "sparse_outer_opt":
return sparse_outer_opt_cube
elif testset == "massive_partitions":
return massive_partitions_cube
elif testset == "updated":
return updated_cube
elif testset == "no_part":
return no_part_cube
elif testset == "other_part":
return other_part_cube
else:
raise ValueError("Unknown param {}".format(testset))
@pytest.fixture(scope="module")
def test_df(
testset,
fullrange_df,
multipartition_df,
sparse_outer_df,
sparse_outer_opt_df,
massive_partitions_df,
updated_df,
no_part_df,
):
if testset == "fullrange":
return fullrange_df
elif testset == "multipartition":
return multipartition_df
elif testset == "sparse_outer":
return sparse_outer_df
elif testset == "sparse_outer_opt":
return sparse_outer_opt_df
elif testset == "massive_partitions":
return massive_partitions_df
elif testset == "updated":
return updated_df
elif testset in ("no_part", "other_part"):
return no_part_df
else:
raise ValueError("Unknown param {}".format(testset))
def test_simple_roundtrip(driver, function_store, function_store_rwro):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df, cube=cube, store=function_store)
result = driver(cube=cube, store=function_store_rwro)
assert len(result) == 1
df_actual = result[0]
df_expected = df.reindex(columns=["p", "v", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_complete(driver, module_store, test_cube, test_df):
result = driver(cube=test_cube, store=module_store)
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, test_df)
def apply_condition_unsafe(df, cond):
# For the sparse_outer testset, the test_df has the wrong datatype because we cannot encode missing integer data in
# pandas.
#
# The condition will not be applicable to the DF because the DF has floats while conditions have ints. We fix that
# by modifying the the condition.
#
# In case there is no missing data because of the right conditions, kartothek will return integer data.
# assert_frame_equal will then complain about this. So in case there is no missing data, let's recover the correct
# dtype here.
if not isinstance(cond, Conjunction):
cond = Conjunction(cond)
float_cols = {col for col in df.columns if df[col].dtype == float}
# convert int to float conditions
cond2 = Conjunction([])
for col, conj in cond.split_by_column().items():
if col in float_cols:
parts = []
for part in conj.conditions:
if isinstance(part, IsInCondition):
part = IsInCondition(
column=part.column, value=tuple((float(v) for v in part.value))
)
elif isinstance(part, InIntervalCondition):
part = InIntervalCondition(
column=part.column,
start=float(part.start),
stop=float(part.stop),
)
else:
part = part.__class__(column=part.column, value=float(part.value))
parts.append(part)
conj = Conjunction(parts)
cond2 &= conj
# apply conditions
df = cond2.filter_df(df).reset_index(drop=True)
# convert float columns to int columns
for col in df.columns:
if df[col].notnull().all():
dtype = df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
return df
@pytest.mark.parametrize(
"cond",
[
C("v1") >= 7,
C("v1") >= 10000,
C("v2") >= 7,
C("v3") >= 3,
C("i1") >= 7,
C("i1") >= 10000,
C("i2") >= 7,
C("i2") != 0,
C("i3") >= 3,
C("p") >= 1,
C("q") >= 1,
C("x") >= 1,
C("y") >= 1,
(C("x") == 3) & (C("y") == 3),
(C("i1") > 0) & (C("i2") > 0),
Conjunction([]),
],
)
def test_condition(driver, module_store, test_cube, test_df, cond):
result = driver(cube=test_cube, store=module_store, conditions=cond)
df_expected = apply_condition_unsafe(test_df, cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["v1", "v2"], ["v2", "v3"], ["v3"]])
def test_select(driver, module_store, test_cube, test_df, payload_columns):
result = driver(cube=test_cube, store=module_store, payload_columns=payload_columns)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
:, sorted(set(payload_columns) | {"x", "y", "z", "p", "q"})
]
pdt.assert_frame_equal(df_actual, df_expected)
def test_filter_select(driver, module_store, test_cube, test_df):
result = driver(
cube=test_cube,
store=module_store,
payload_columns=["v1", "v2"],
conditions=(C("i3") >= 3), # completely unrelated to the payload
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
test_df["i3"] >= 3, ["p", "q", "v1", "v2", "x", "y", "z"]
].reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize(
"partition_by",
[["i1"], ["i2"], ["i3"], ["x"], ["y"], ["p"], ["q"], ["i1", "i2"], ["x", "y"]],
)
def test_partition_by(driver, module_store, test_cube, test_df, partition_by):
dfs_actual = driver(cube=test_cube, store=module_store, partition_by=partition_by)
dfs_expected = [
df_g.reset_index(drop=True)
for g, df_g in test_df.groupby(partition_by, sort=True)
]
for df_expected in dfs_expected:
for col in df_expected.columns:
if df_expected[col].dtype == float:
try:
df_expected[col] = df_expected[col].astype(int)
except Exception:
pass
assert len(dfs_actual) == len(dfs_expected)
for df_actual, df_expected in zip(dfs_actual, dfs_expected):
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("dimension_columns", list(permutations(["x", "y", "z"])))
def test_sort(driver, module_store, test_cube, test_df, dimension_columns):
result = driver(
cube=test_cube, store=module_store, dimension_columns=dimension_columns
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.sort_values(
list(dimension_columns) + list(test_cube.partition_columns)
).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["y", "z"], ["y", "z", "v3"]])
def test_projection(driver, module_store, test_cube, test_df, payload_columns):
result = driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=payload_columns,
)
assert len(result) == 1
df_actual = result[0]
df_expected = (
test_df.loc[:, sorted(set(payload_columns) | {"y", "z", "p", "q"})]
.drop_duplicates()
.sort_values(["y", "z", "p", "q"])
.reset_index(drop=True)
)
pdt.assert_frame_equal(df_actual, df_expected)
def test_stresstest_index_select_row(driver, function_store):
n_indices = 100
n_rows = 1000
data = {"x": np.arange(n_rows), "p": 0}
for i in range(n_indices):
data["i{}".format(i)] = np.arange(n_rows)
df = pd.DataFrame(data)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
index_columns=["i{}".format(i) for i in range(n_indices)],
)
build_cube(data=df, cube=cube, store=function_store)
conditions = Conjunction([(C("i{}".format(i)) == 0) for i in range(n_indices)])
result = driver(
cube=cube,
store=function_store,
conditions=conditions,
payload_columns=["p", "x"],
)
assert len(result) == 1
df_actual = result[0]
df_expected = df.loc[df["x"] == 0].reindex(columns=["p", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_fail_missing_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=["x", "a", "b"])
assert (
"Following dimension columns were requested but are missing from the cube: a, b"
in str(exc.value)
)
def test_fail_empty_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=[])
assert "Dimension columns cannot be empty." in str(exc.value)
def test_fail_missing_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["foo"])
assert (
"Following partition-by columns were requested but are missing from the cube: foo"
in str(exc.value)
)
def test_fail_unindexed_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["v1", "v2"])
assert (
"Following partition-by columns are not indexed and cannot be used: v1, v2"
in str(exc.value)
)
def test_fail_missing_condition_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
conditions=(C("foo") == 1) & (C("bar") == 2),
)
assert (
"Following condition columns are required but are missing from the cube: bar, foo"
in str(exc.value)
)
def test_fail_missing_payload_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, payload_columns=["foo", "bar"])
assert "Cannot find the following requested payload columns: bar, foo" in str(
exc.value
)
def test_fail_projection(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=["v1"],
)
assert (
'Cannot project dataset "seed" with dimensionality [x, y, z] to [y, z] '
"while keeping the following payload intact: v1" in str(exc.value)
)
def test_fail_unstable_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_fail_unstable_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, partition_by={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_wrong_condition_type(driver, function_store, driver_name):
types = {
"int": pd.Series([-1], dtype=np.int64),
"uint": pd.Series([1], dtype=np.uint64),
"float": pd.Series([1.3], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"str": pd.Series(["foo"], dtype=object),
}
cube = Cube(
dimension_columns=["d_{}".format(t) for t in sorted(types.keys())],
partition_columns=["p_{}".format(t) for t in sorted(types.keys())],
uuid_prefix="typed_cube",
index_columns=["i_{}".format(t) for t in sorted(types.keys())],
)
data = {
"seed": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "v1"]
}
),
"enrich": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v2"]
}
),
}
build_cube(data=data, store=function_store, cube=cube)
df = pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v1", "v2"]
}
)
for col in df.columns:
t1 = col.split("_")[1]
for t2 in sorted(types.keys()):
cond = C(col) == types[t2].values[0]
if t1 == t2:
result = driver(cube=cube, store=function_store, conditions=cond)
assert len(result) == 1
df_actual = result[0]
df_expected = cond.filter_df(df).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected, check_like=True)
else:
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=function_store, conditions=cond)
assert "has wrong type" in str(exc.value)
def test_condition_on_null(driver, function_store):
df = pd.DataFrame(
{
"x": pd.Series([0, 1, 2], dtype=np.int64),
"p": | pd.Series([0, 0, 1], dtype=np.int64) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from surprise.model_selection import train_test_split
from surprise import KNNBasic, accuracy
from surprise import Dataset, Reader
from surprise.dump import dump
from recmetrics import rmse, mse, mark, mark_plot
from os.path import join, split
from random import sample
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import *
# In[2]:
_HERE = '' # split(__file__)[0]
# # Read dataset
# In[ ]:
ratings = ratings_df()
ratings = ratings.query('rating >=3')
ratings = ratings.sample(n=1000)
ratings.reset_index(drop=True, inplace=True)
ratings.head()
# # k-NN Model training
# In[ ]:
reader = Reader(rating_scale=(0, 5))
data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader=reader)
trainset, testset = train_test_split(data, test_size=.25)
# In[ ]:
sim_options = {'name': 'cosine',
'user_based': False # compute similarities between items
}
algo = KNNBasic(k=2, sim_options=sim_options, verbose=True)
# In[ ]:
algo.fit(trainset)
# In[ ]:
testset[0]
# In[ ]:
# In[ ]:
preds = algo.test(testset, verbose=False)
# In[ ]:
preds = | pd.DataFrame(preds) | pandas.DataFrame |
import operator
from operator import methodcaller
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
pytestmark = pytest.mark.pandas
def test_table_column(t, df):
expr = t.plain_int64
result = expr.execute()
expected = df.plain_int64
tm.assert_series_equal(result, expected)
def test_literal(client):
assert client.execute(ibis.literal(1)) == 1
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df')
def test_selection(t, df):
expr = t[
((t.plain_strings == 'a') | (t.plain_int64 == 3))
& (t.dup_strings == 'd')
]
result = expr.execute()
expected = df[
((df.plain_strings == 'a') | (df.plain_int64 == 3))
& (df.dup_strings == 'd')
].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate(t, df):
expr = t.mutate(x=t.plain_int64 + 1, y=t.plain_int64 * 2)
result = expr.execute()
expected = df.assign(x=df.plain_int64 + 1, y=df.plain_int64 * 2)
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_scope_does_not_override(t, df):
col = t.plain_int64
expr = t[
[
col.name('new_col'),
col.sum()
.over(ibis.window(group_by='dup_strings'))
.name('grouped'),
]
]
result = expr.execute()
expected = pd.concat(
[
df[['plain_int64', 'dup_strings']].rename(
columns={'plain_int64': 'new_col'}
),
df.groupby('dup_strings')
.plain_int64.transform('sum')
.reset_index(drop=True)
.rename('grouped'),
],
axis=1,
)[['new_col', 'grouped']]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'where',
[
lambda t: None,
lambda t: t.dup_strings == 'd',
lambda t: (t.dup_strings == 'd') | (t.plain_int64 < 100),
],
)
@pytest.mark.parametrize(
('ibis_func', 'pandas_func'),
[
(methodcaller('abs'), np.abs),
(methodcaller('ceil'), np.ceil),
(methodcaller('exp'), np.exp),
(methodcaller('floor'), np.floor),
(methodcaller('ln'), np.log),
(methodcaller('log10'), np.log10),
(methodcaller('log', 2), lambda x: np.log(x) / np.log(2)),
(methodcaller('log2'), np.log2),
(methodcaller('round', 0), lambda x: x.round(0).astype('int64')),
(methodcaller('round', -2), methodcaller('round', -2)),
(methodcaller('round', 2), methodcaller('round', 2)),
(methodcaller('round'), lambda x: x.round().astype('int64')),
(methodcaller('sign'), np.sign),
(methodcaller('sqrt'), np.sqrt),
],
)
def test_aggregation_group_by(t, df, where, ibis_func, pandas_func):
ibis_where = where(t)
expr = t.group_by(t.dup_strings).aggregate(
avg_plain_int64=t.plain_int64.mean(where=ibis_where),
sum_plain_float64=t.plain_float64.sum(where=ibis_where),
mean_float64_positive=ibis_func(t.float64_positive).mean(
where=ibis_where
),
neg_mean_int64_with_zeros=(-t.int64_with_zeros).mean(where=ibis_where),
nunique_dup_ints=t.dup_ints.nunique(),
)
result = expr.execute()
pandas_where = where(df)
mask = slice(None) if pandas_where is None else pandas_where
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_int64': lambda x, mask=mask: x[mask].mean(),
'plain_float64': lambda x, mask=mask: x[mask].sum(),
'dup_ints': 'nunique',
'float64_positive': (
lambda x, mask=mask, func=pandas_func: func(x[mask]).mean()
),
'int64_with_zeros': lambda x, mask=mask: (-x[mask]).mean(),
}
)
.reset_index()
.rename(
columns={
'plain_int64': 'avg_plain_int64',
'plain_float64': 'sum_plain_float64',
'dup_ints': 'nunique_dup_ints',
'float64_positive': 'mean_float64_positive',
'int64_with_zeros': 'neg_mean_int64_with_zeros',
}
)
)
# TODO(phillipc): Why does pandas not return floating point values here?
expected['avg_plain_int64'] = expected.avg_plain_int64.astype('float64')
result['avg_plain_int64'] = result.avg_plain_int64.astype('float64')
expected[
'neg_mean_int64_with_zeros'
] = expected.neg_mean_int64_with_zeros.astype('float64')
result[
'neg_mean_int64_with_zeros'
] = result.neg_mean_int64_with_zeros.astype('float64')
expected['mean_float64_positive'] = expected.mean_float64_positive.astype(
'float64'
)
result['mean_float64_positive'] = result.mean_float64_positive.astype(
'float64'
)
lhs = result[expected.columns]
rhs = expected
tm.assert_frame_equal(lhs, rhs)
def test_aggregation_without_group_by(t, df):
expr = t.aggregate(
avg_plain_int64=t.plain_int64.mean(),
sum_plain_float64=t.plain_float64.sum(),
)
result = expr.execute()[['avg_plain_int64', 'sum_plain_float64']]
new_names = {
'plain_float64': 'sum_plain_float64',
'plain_int64': 'avg_plain_int64',
}
expected = (
pd.Series(
[df['plain_int64'].mean(), df['plain_float64'].sum()],
index=['plain_int64', 'plain_float64'],
)
.to_frame()
.T.rename(columns=new_names)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_with_having(t, df):
expr = (
t.group_by(t.dup_strings)
.having(t.plain_float64.sum() == 5)
.aggregate(avg_a=t.plain_int64.mean(), sum_c=t.plain_float64.sum())
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_int64': 'mean', 'plain_float64': 'sum'})
.reset_index()
.rename(columns={'plain_int64': 'avg_a', 'plain_float64': 'sum_c'})
)
expected = expected.loc[expected.sum_c == 5, ['avg_a', 'sum_c']]
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_rename_key(t, df):
expr = t.groupby(t.dup_strings.name('foo')).aggregate(
dup_string_count=t.dup_strings.count()
)
assert 'foo' in expr.schema()
result = expr.execute()
assert 'foo' in result.columns
expected = (
df.groupby('dup_strings')
.dup_strings.count()
.rename('dup_string_count')
.reset_index()
.rename(columns={'dup_strings': 'foo'})
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('reduction', ['mean', 'sum', 'count', 'std', 'var'])
@pytest.mark.parametrize(
'where',
[
lambda t: (t.plain_strings == 'a') | (t.plain_strings == 'c'),
lambda t: (t.dup_strings == 'd')
& ((t.plain_int64 == 1) | (t.plain_int64 == 3)),
lambda t: None,
],
)
def test_reduction(t, df, reduction, where):
func = getattr(t.plain_int64, reduction)
mask = where(t)
expr = func(where=mask)
result = expr.execute()
df_mask = where(df)
expected_func = getattr(
df.loc[df_mask if df_mask is not None else slice(None), 'plain_int64'],
reduction,
)
expected = expected_func()
assert result == expected
@pytest.mark.parametrize(
'reduction',
[
lambda x: x.any(),
lambda x: x.all(),
lambda x: ~(x.any()),
lambda x: ~(x.all()),
],
)
def test_boolean_aggregation(t, df, reduction):
expr = reduction(t.plain_int64 == 1)
result = expr.execute()
expected = reduction(df.plain_int64 == 1)
assert result == expected
@pytest.mark.parametrize('column', ['float64_with_zeros', 'int64_with_zeros'])
def test_null_if_zero(t, df, column):
expr = t[column].nullifzero()
result = expr.execute()
expected = df[column].replace(0, np.nan)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('left', 'right', 'expected', 'compare'),
[
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(1),
lambda df: np.nan,
np.testing.assert_array_equal, # treats NaNs as equal
id='literal_literal_equal',
),
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(2),
lambda df: 1,
np.testing.assert_equal,
id='literal_literal_not_equal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: ibis.literal('a'),
lambda df: df.dup_strings.where(df.dup_strings != 'a'),
tm.assert_series_equal,
id='series_literal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: t.dup_strings,
lambda df: df.dup_strings.where(df.dup_strings != df.dup_strings),
tm.assert_series_equal,
id='series_series',
),
pytest.param(
lambda t: ibis.literal('a'),
lambda t: t.dup_strings,
lambda df: pd.Series(
np.where(df.dup_strings == 'a', np.nan, 'a'), index=df.index
),
tm.assert_series_equal,
id='literal_series',
),
],
)
def test_nullif(t, df, left, right, expected, compare):
expr = left(t).nullif(right(t))
result = execute(expr)
compare(result, expected(df))
def test_nullif_inf():
df = pd.DataFrame({'a': [np.inf, 3.14, -np.inf, 42.0]})
con = connect({'t': df})
t = con.table('t')
expr = t.a.nullif(np.inf).nullif(-np.inf)
result = expr.execute()
expected = pd.Series([np.nan, 3.14, np.nan, 42.0], name='a')
tm.assert_series_equal(result, expected)
def test_group_concat(t, df):
expr = t.groupby(t.dup_strings).aggregate(
foo=t.plain_int64.group_concat(',')
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(lambda df: ','.join(df.plain_int64.astype(str)))
.reset_index()
.rename(columns={0: 'foo'})
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize('offset', [0, 2])
def test_frame_limit(t, df, offset):
n = 5
df_expr = t.limit(n, offset=offset)
result = df_expr.execute()
expected = df.iloc[offset : offset + n].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.xfail(
raises=AttributeError, reason='TableColumn does not implement limit'
)
@pytest.mark.parametrize('offset', [0, 2])
def test_series_limit(t, df, offset):
n = 5
s_expr = t.plain_int64.limit(n, offset=offset)
result = s_expr.execute()
tm.assert_series_equal(result, df.plain_int64.iloc[offset : offset + n])
@pytest.mark.parametrize(
('key', 'pandas_by', 'pandas_ascending'),
[
(lambda t, col: [ibis.desc(t[col])], lambda col: [col], False),
(
lambda t, col: [t[col], ibis.desc(t.plain_int64)],
lambda col: [col, 'plain_int64'],
[True, False],
),
(
lambda t, col: [ibis.desc(t.plain_int64 * 2)],
lambda col: ['plain_int64'],
False,
),
],
)
@pytest.mark.parametrize(
'column',
['plain_datetimes_naive', 'plain_datetimes_ny', 'plain_datetimes_utc'],
)
def test_sort_by(t, df, column, key, pandas_by, pandas_ascending):
expr = t.sort_by(key(t, column))
result = expr.execute()
expected = df.sort_values(
pandas_by(column), ascending=pandas_ascending
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_complex_sort_by(t, df):
expr = t.sort_by(
[ibis.desc(t.plain_int64 * t.plain_float64), t.plain_float64]
)
result = expr.execute()
expected = (
df.assign(foo=df.plain_int64 * df.plain_float64)
.sort_values(['foo', 'plain_float64'], ascending=[False, True])
.drop(['foo'], axis=1)
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_distinct(t, df):
expr = t.dup_strings.distinct()
result = expr.execute()
expected = pd.Series(df.dup_strings.unique(), name='dup_strings')
tm.assert_series_equal(result, expected)
def test_count_distinct(t, df):
expr = t.dup_strings.nunique()
result = expr.execute()
expected = df.dup_strings.nunique()
assert result == expected
def test_value_counts(t, df):
expr = t.dup_strings.value_counts()
result = expr.execute()
expected = (
df.dup_strings.value_counts()
.reset_index()
.rename(columns={'dup_strings': 'count'})
.rename(columns={'index': 'dup_strings'})
.sort_values(['dup_strings'])
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_table_count(t, df):
expr = t.count()
result = expr.execute()
expected = len(df)
assert result == expected
def test_weighted_average(t, df):
expr = t.groupby(t.dup_strings).aggregate(
avg=(t.plain_float64 * t.plain_int64).sum() / t.plain_int64.sum()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(
lambda df: (df.plain_int64 * df.plain_float64).sum()
/ df.plain_int64.sum()
)
.reset_index()
.rename(columns={0: 'avg'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_multiple_keys(t, df):
expr = t.groupby([t.dup_strings, t.dup_ints]).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
result = expr.execute()
expected = (
df.groupby(['dup_strings', 'dup_ints'])
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate_after_group_by(t, df):
gb = t.groupby(t.dup_strings).aggregate(
avg_plain_float64=t.plain_float64.mean()
)
expr = gb.mutate(x=gb.avg_plain_float64)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_float64': 'mean'})
.reset_index()
.rename(columns={'plain_float64': 'avg_plain_float64'})
)
expected = expected.assign(x=expected.avg_plain_float64)
tm.assert_frame_equal(result[expected.columns], expected)
def test_groupby_with_unnamed_arithmetic(t, df):
expr = t.groupby(t.dup_strings).aggregate(
naive_variance=(
(t.plain_float64 ** 2).sum() - t.plain_float64.mean() ** 2
)
/ t.plain_float64.count()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_float64': lambda x: ((x ** 2).sum() - x.mean() ** 2)
/ x.count()
}
)
.reset_index()
.rename(columns={'plain_float64': 'naive_variance'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_isnull(t, df):
expr = t.strings_with_nulls.isnull()
result = expr.execute()
expected = df.strings_with_nulls.isnull()
tm.assert_series_equal(result, expected)
def test_notnull(t, df):
expr = t.strings_with_nulls.notnull()
result = expr.execute()
expected = df.strings_with_nulls.notnull()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('raw_value', [0.0, 1.0])
def test_scalar_parameter(t, df, raw_value):
value = ibis.param(dt.double)
expr = t.float64_with_zeros == value
result = expr.execute(params={value: raw_value})
expected = df.float64_with_zeros == raw_value
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('elements', [[1], (1,), {1}, frozenset({1})])
def test_isin(t, df, elements):
expr = t.plain_float64.isin(elements)
expected = df.plain_float64.isin(elements)
result = expr.execute()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('elements', [[1], (1,), {1}, frozenset({1})])
def test_notin(t, df, elements):
expr = t.plain_float64.notin(elements)
expected = ~df.plain_float64.isin(elements)
result = expr.execute()
tm.assert_series_equal(result, expected)
def test_cast_on_group_by(t, df):
expr = t.groupby(t.dup_strings).aggregate(
casted=(t.float64_with_zeros == 0).cast('int64').sum()
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.float64_with_zeros.apply(lambda s: (s == 0).astype('int64').sum())
.reset_index()
.rename(columns={'float64_with_zeros': 'casted'})
)
| tm.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from shapely import wkt
import pytest
from tenzing.core.model_implementations.typesets import tenzing_standard
_test_suite = [
pd.Series([1, 2, 3], name='int_series'),
pd.Series([1, 2, 3], name='categorical_int_series', dtype='category'),
pd.Series([1, 2, np.nan], name='int_nan_series'),
pd.Series([1.0, 2.1, 3.0], name='float_series'),
pd.Series([1.0, 2.5, np.nan], name='float_nan_series'),
pd.Series([1.0, 2.0, 3.1], dtype='category', name='categorical_float_series'),
pd.Series(['hello', 'world'], name='string_series'),
pd.Series(['hello', 'world'], dtype='category', name='categorical_string_series'),
pd.Series(['2017-10-01', '12/05/2017'], name='timestamp_string_series'),
pd.Series([True, False], name='bool_series'),
pd.Series([np.complex(0, 0), np.complex(1, 2), np.complex(3, -1), np.nan], name='complex_series'),
pd.Series([np.complex(0, 0), np.complex(1, 2), np.complex(3, -1), np.nan], name='categorical_complex_series',
dtype='category'),
pd.Series([ | pd.datetime(2017, 3, 5) | pandas.datetime |
"""
Managing csv files with data type(float, str) validator
"""
import csv
import logging
import sys
from os import listdir
from os.path import isfile, join
import pandas as pd
class CSVManager:
"""
CSVManager is able to handle easily csv files with validation.
"""
csv_path: str
csv_files: list
logger = logging.getLogger('Importer')
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def __init__(self, current_path: str):
self.csv_path = current_path
self.logger.setLevel(logging.DEBUG)
self.handler.setLevel(logging.INFO)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def get_all_data_as_list(self):
"""
Getting all the csv data as a list.
:return: all the csv content in a list.
"""
return self.read_all_csv()
def get_csv_files(self): # pragma: no cover
"""
Get the list of csv file names
:return: list of csv file names
"""
module_path = self.csv_path
csv_files = [f for f in listdir(module_path) if isfile(join(module_path, f))
and ".csv" in f]
self.csv_files = list(csv_files)
return csv_files
def check_csv_for_diff_types(self, csv_reader: list):
"""
Checking for different types in rows
NOTE: only checking str and float types (no datetime and such)
NOTE2: sale/incoming can be used as a boolean type
:param csv_reader: csv file content
:type csv_reader: list
:return: Bool
"""
self.logger.debug(csv_reader)
# read content by rows without header
dataframe = | pd.DataFrame(csv_reader) | pandas.DataFrame |
# MIT License
#
# Copyright (c) 2018 Capital One Services, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import filecmp
import os
import shutil
from pathlib import Path
import boto3
import numpy as np
import pandas as pd
import pytest
import snowflake.connector
import locopy
DBAPIS = [snowflake.connector]
INTEGRATION_CREDS = str(Path.home()) + os.sep + ".locopy-sfrc"
S3_BUCKET = "locopy-integration-testing"
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_FILE = os.path.join(CURR_DIR, "data", "mock_file.txt")
LOCAL_FILE_JSON = os.path.join(CURR_DIR, "data", "mock_file.json")
LOCAL_FILE_DL = os.path.join(CURR_DIR, "data", "mock_file_dl.txt")
TEST_DF = pd.read_csv(os.path.join(CURR_DIR, "data", "mock_dataframe.txt"), sep=",")
TEST_DF_2 = pd.read_csv(os.path.join(CURR_DIR, "data", "mock_dataframe_2.txt"), sep=",")
CREDS_DICT = locopy.utility.read_config_yaml(INTEGRATION_CREDS)
@pytest.fixture()
def s3_bucket():
session = boto3.Session(profile_name=CREDS_DICT["profile"])
c = session.client("s3")
c.create_bucket(Bucket=S3_BUCKET)
yield c
r = session.resource("s3").Bucket(S3_BUCKET)
r.objects.all().delete()
r.delete()
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_snowflake_execute_single_rows(dbapi):
expected = pd.DataFrame({"field_1": [1], "field_2": [2]})
with locopy.Snowflake(dbapi=dbapi, **CREDS_DICT) as test:
test.execute("SELECT 1 AS field_1, 2 AS field_2 ")
df = test.to_dataframe()
df.columns = [c.lower() for c in df.columns]
assert np.allclose(df["field_1"], expected["field_1"])
@pytest.mark.integration
@pytest.mark.parametrize("dbapi", DBAPIS)
def test_snowflake_execute_multiple_rows(dbapi):
expected = | pd.DataFrame({"field_1": [1, 2], "field_2": [1, 2]}) | pandas.DataFrame |
import os
import pandas as pd
import requests
import json
import re
import warnings
import geocoder
import getpass
google_key = os.getenv("POETRY_GOOGLE_KEY")
yelp_key = os.getenv("POETRY_YELP_KEY")
pd.options.display.max_colwidth = 300
if google_key is None:
google_key = getpass.getpass("Please input your Google API key.\n")
if yelp_key is None:
yelp_key = getpass.getpass("Please input your Yelp Fusion API key.\n")
def ParsingAddress(raw_location_list):
"""
A supporting function that parses the raw location info from Yelp Fusion API to make it more readable.
Parameters
----------
raw_location_list : pandas.core.series.Series
Required. A pd.Series of dictionaries containing address information in the JSON output from Fusion API.
Returns
-------
pandas.core.series.Series
A list that stores more readable result. A typical element from the output list is a string of format: "<street address>, <City>, <State> <ZIP code>". E.g. "509 Amsterdam Ave, New York, NY 10024".
"""
location_list = []
for raw_location in raw_location_list:
temp = [v for k,v in raw_location.items()]
temp_location = ', '.join(temp[len(temp)-1])
location_list = location_list + [temp_location]
return(location_list)
def SearchRestaurant(yelp_key = yelp_key,
searching_keywords = "restaurant",
location = "Union Square, New York, NY 10003",
longitude = None,
latitude = None,
distance_max = 15000,
list_len = 40,
price = "1,2,3,4"):
"""
Perform restaurant searching on Yelp.
Parameters
----------
yelp_key : str
Optional. The API key for Yelp fusion API.
searching_keywords : str
Optional. The keywords for Yelp searching. If not specified, the general term "restaurant" is searched.
location : str
Optional. A string describe the address of the location around which the search is conducted.
longitude : float
Required if location is not specified. The longitude of the current location.
latitude : float
Required if location is not specified. The latitude of the current location.
distance_max : int
Optional. A suggested search radius in meters.
list_len : int
Optional. The number of restaurants to show in the resulting dataframe.
price : str
Optional. Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.
The price filter can be a list of comma delimited pricing levels. For example, "1, 2, 3" will
filter the results to show the ones that are $, $$, or $$$.
Returns
-------
pandas.core.frame.DataFrame
A dataframe that include essential information about the restaurants in the resarching result.
Examples
--------
>>> from yelpgoogletool import yelpgoogletool
>>> yelpgoogletool.SearchRestaurant(location = "Columbia University, NYC",list_len=2)
name id distance location price phone rating review_count
0 The Tang - Upper West Side TzhAlljC_843JO7UDDUIaQ 0.6 920 Amsterdam Ave, New York, NY 10025 $$ +16465967970 4.5 215
1 <NAME> H9GD7km7riFooM0FkdwOPg 0.5 2756 Broadway, New York, NY 10025 $$ +12128735025 4.0 2527
"""
# Check whether the parameters are of valid type
longlat_input_checker = (longitude == None) + (latitude == None)
assert type(searching_keywords) == str, "The parameter 'searching_keywords' should be a string!"
assert type(location) == str, "The parameter 'location' should be a string!"
assert (type(longitude) == type(None) or type(longitude) == float), "The parameter 'longitude' should be a float!"
assert (type(latitude) == type(None) or type(latitude) == float), "The parameter 'latitude' should be a float!"
assert type(distance_max) == int, "The parameter 'distance_max' should be an integer!"
assert type(list_len) == int, "The parameter 'list_len' should be an integer!"
assert (type(price) == type(None) or type(price) == str), "The parameter 'price' should be a str representing price levels, e.g. '1,2,3'!"
# Check whether longitude and latitude are speciefied or not specified at the same time
assert longlat_input_checker != 1, "Either both or neither of 'longitude' and 'latitude' should be specified!"
# Check whether some parameters are off limit
assert distance_max <= 20000, "You do not want to travel more than 20 km for your dinner!"
assert list_len <= 500, "The length of searching result list should be no more than 500!"
# Set the parameters for API queries
url = "https://api.yelp.com/v3/businesses/search"
headers = {"Authorization":yelp_key}
querystring = {"term":searching_keywords}
if longlat_input_checker == 0:
assert (longitude >= -180) & (latitude >= -180) & (longitude <= 180) & (latitude <= 180), "Invalid 'longitude' or 'latitude'"
if location != "Union Square, New York, NY 10003":
warnings.warn("The parameter 'location' is not used when longitude and latitude are specified.")
querystring["longitude"] = longitude
querystring["latitude"] = latitude
else:
querystring["location"] = location
if type(price) == str:
querystring["price"] = price
# Set offset to be the number of records that has already been searched
offset = 0
df_restaurant_list = pd.DataFrame()
while offset < list_len:
# This is the number of records to search in this batch
limit = min(list_len - offset, 50)
querystring["limit"] = limit
querystring["offset"] = offset
# request data from Fusion API
response = requests.request("GET", url, headers = headers, params = querystring)
rspn_json = response.json()
#if rspn_json
# merge the data into df_restaurant_list
for business in rspn_json['businesses']:
df_restaurant_list = df_restaurant_list.append( | pd.Series(business) | pandas.Series |
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
import pandas as pd
from IPython import display
import time
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
#Obtain data from CSVs
fundementalsData = pd.read_csv("./nyse/fundamentals.csv").dropna()
pricesData = pd.read_csv("./nyse/prices.csv")
#Convert data to dataframes
fundementalsDataFrame = pd.DataFrame(fundementalsData)
pricesDataFrame = pd.DataFrame(pricesData)
#Manipulate data in fundementals csv
#Convert date into POSIX time
fundementalsDataFrame["Period Ending"] = pd.DatetimeIndex (fundementalsData["Period Ending"]).astype (np.int64)//(10**9)
#Convert column names for merge consistancy later
fundementalsDataFrame.columns = fundementalsDataFrame.columns.str.replace("Ticker Symbol","symbol")
fundementalsDataFrame.columns = fundementalsDataFrame.columns.str.replace("Period Ending","date")
#Manipulate price data
#Convert dates to POSIX time
pricesDataFrame["date"] = pd.DatetimeIndex (pricesDataFrame["date"]).astype (np.int64)//(10**9)
#Only need 3 columns
pricesDataFrame = pricesDataFrame[["date","symbol","open"]]
#Copy prices to new dataframe to get difference over time
priceDifferenceDataFrame = pricesDataFrame.copy()
#Subtract 60 days from copied data time, to get difference in price over 60 days
priceDifferenceDataFrame["date"] = priceDifferenceDataFrame["date"] - (86400 * 60)
#Merge original and coppied data and rename columns
mergeDataFrame = pd.merge(pricesDataFrame, priceDifferenceDataFrame, on=["symbol","date"])
mergeDataFrame.columns = ["date", "symbol", "initial", "final"]
#Add new column, being the difference in price over 60 days
mergeDataFrame["delta"] = mergeDataFrame["final"] - mergeDataFrame["initial"]
#Merge previous data with fundementals data
finalDataFrame = pd.merge(fundementalsDataFrame, mergeDataFrame, on=["symbol","date"])
#display.display(finalDataFrame)
finalDataFrame = | pd.get_dummies(finalDataFrame) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# # 5m - Df unification (10 calib. fn-s)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from os.path import join
import pickle
from copy import copy
def get_data_name(file):
if "resnet110" in file:
return "resnet110"
elif "densenet40" in file:
return "densenet40"
else:
return "wide32"
def get_strs(file, is_ece = True, is_kde = False, is_bip = False):
extra = 0
pieces = file.split(".")[0].split("_tag_")
parts1 = pieces[0].split("_")
parts2 = pieces[1].split("_")
n_data = -1
seed = -1
# binning_CV_seed_0_10000_VecS_wide32_s7_tag_confidencegt1_dp_-1.pkl
if is_ece:
cal_method = "_".join(parts1[5:6])
data_name = get_data_name("_".join(parts1[6:]))
tag_name = parts2[0][:-3]
cgt_nr = int(parts2[0][-1])
# KDE_seed_9_10000_VecS_resnet_wide32_s7_tag_1vsRest5_with_c_dp_-1.pkl
elif is_kde:
cal_method = "_".join(parts1[4:5])
data_name = get_data_name("_".join(parts1[5:]))
tag_name = parts2[0]
cgt_nr = -1
# df_seed_1_platt_resnet_3000_cv_0_wide32_s7_tag_confidence_with_cgt3_dp_-1_iso_beta_platt.pkl
# df_seed_6_TempS_3000_cv_0_resnet_wide32_s7_1vsRest5_m_3_921420382311321_with_cgt0_dp_-1_iso_beta_platt
elif is_bip:
cal_method = "_".join(parts1[3:4])
data_name = get_data_name("_".join(parts1[4:]))
n_data = int(parts1[4])
tag_name = parts2[0][:-3]
cgt_nr = int(parts2[0][-1])
seed = int(parts1[2])
# 'df_seed_0_beta_10000_cv_0_densenet40_s7_tag_1vsRest1gt0_dp_-1_iso_beta_platt.pkl'
#df_seed_0_beta_10000_cv_0_resnet110_s7_tag_confidencegt3_dp_-1_iso_beta_platt.pkl
# df_seed_2_Isotonic_resnet110_10000_cv_0_s7_tag_confidence_with_c_dp_-1_PW_NN4_sweep.pkl
else:
cal_method = "_".join(parts1[3:4])
data_name = get_data_name("_".join(parts1[4:]))
tag_name = parts2[0]
cgt_nr = -1
return (cal_method, data_name, tag_name, cgt_nr, n_data, seed)
# In[7]:
def get_cgts(df):
all_cdc = []
all_cdcs = []
all_pdc = []
all_pdcs = []
for cdc, cdcs, pdc, pdcs in zip(df.c_hat_distance_c, df.c_hat_distance_c_square, df.p_distance_c, df.p_distance_c_square):
if len(np.array(cdc)) != 4:
print(cdc)
all_cdc.append(np.array(cdc))
all_cdcs.append(np.array(cdcs))
all_pdc.append(np.array(pdc))
all_pdcs.append(np.array(pdcs))
all_cdc = np.array(all_cdc)
all_cdcs = np.array(all_cdcs)
all_pdc = np.array(all_pdc)
all_pdcs = np.array(all_pdcs)
dfs = []
for i in range(4):
if len(all_cdc.shape) == 1:
print()
df_new = df.copy()
df_new.c_hat_distance_c = all_cdc[:,i]
df_new.c_hat_distance_c_square = all_cdcs[:,i]
df_new.p_distance_c = all_pdc[:,i]
df_new.p_distance_c_square = all_pdcs[:,i]
df_new.cgt_nr = i
dfs.append(df_new)
return pd.concat(dfs)
def prep_ECE(files_ECE, columns, path, id_tag):
dfs = []
for file in files_ECE:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, _, _ = get_strs(file)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
dfs.append(df)
df_ECE = pd.concat(dfs)
# Binning column = full method name
df_ECE["binning"] = df_ECE["binning"] + "_" + df_ECE["n_bins"].map(str) + "_" + df_ECE["n_folds"].map(str)
# Remove CV marker from no CV rows
df_ECE["binning"] = df_ECE['binning'].str.replace('(_0$)', "")
# ECE drop useless columns
df_ECE = df_ECE.drop(labels=['n_folds'], axis=1)
# ECE rename columns to match PW
df_ECE = df_ECE.rename({"ECE_abs":"c_hat_distance_p", "ECE_abs_debiased": "c_hat_distance_p_debiased",
"ECE_square":"c_hat_distance_p_square", "ECE_square_debiased":"c_hat_distance_p_square_debiased",
"true_calibration_error_abs":"p_distance_c", "true_calibration_error_square":"p_distance_c_square",
"slope_abs_c_hat_dist_c": "c_hat_distance_c", "slope_square_c_hat_dist_c": "c_hat_distance_c_square"}, axis=1)
df_ECE = df_ECE[columns]
df_ECE.to_pickle("res_ECE_%s.pkl" % id_tag, protocol=4)
def prep_PW(files_PW, columns, path, id_tag):
dfs = []
for file in files_PW:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, _, _ = get_strs(file, is_ece = False)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
dfs.append(df)
df_PW = pd.concat(dfs)
#df_PW.to_pickle("res_PW_%s_test.pkl" % id_tag, protocol=4)
# binnings = df_PW.binning.unique()
# binning_with_trick = []
# for binning in binnings:
# if "trick" in binning:
# binning_with_trick.append(binning)
# for bwt in binning_with_trick:
# df_PW = df_PW.loc[df_PW.binning != bwt] # Drop trick
print(df_PW.binning.unique())
# Create dummy columns for our method
df_PW["c_hat_distance_p_debiased"] = df_PW["c_hat_distance_p"]
df_PW["c_hat_distance_p_square_debiased"] = df_PW["c_hat_distance_p_square"]
# Unify calibration_function name column to match ECE_df
df_PW["calibration_function"] = df_PW['calibration_function'].str.replace('(_[0-9].[0-9]+$)', "")
df_PW = get_cgts(df_PW)
df_PW = df_PW[columns]
df_PW.to_pickle("res_PW_%s.pkl" % id_tag, protocol=4)
def prep_BIP(files_BIP, columns, path, id_tag):
dfs = []
for file in files_BIP:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, n_data, seed = get_strs(file, is_ece = False, is_bip = True)
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
df["n_data"] = n_data
df["seed"] = seed
df["p_distance_c"] = -1
df["p_distance_c_squared"] = -1
dfs.append(df)
df_BIP = pd.concat(dfs)
df_BIP = df_BIP.sort_values(by=["binning", "n_data", "calibration_function", "model_name", "tag_name", "cgt_nr", "seed"])
with open("res_PW_%s.pkl" % id_tag, "rb") as f:
res_PW = pickle.load(f)
bins_uniq = res_PW.binning.unique()
print(bins_uniq)
sel = res_PW.loc[res_PW.binning == bins_uniq[0]].sort_values(by=["binning", "n_data", "calibration_function", "model_name", "tag_name", "cgt_nr", "seed"])
p_dists = sel.loc[:, ["p_distance_c", "p_distance_c_square"]].values
p_dists_x3 = np.concatenate([p_dists, p_dists, p_dists])
df_BIP["p_distance_c"] = p_dists_x3[:, 0]
df_BIP["p_distance_c_square"] = p_dists_x3[:, 1]
# df_BIP preprocessing
# Create dummy columns for our method
df_BIP["c_hat_distance_p_debiased"] = df_BIP["c_hat_distance_p"]
df_BIP["c_hat_distance_p_square_debiased"] = df_BIP["c_hat_distance_p_square"]
# Unify calibration_function name column to match ECE_df
df_BIP["calibration_function"] = df_BIP['calibration_function'].str.replace('(_[0-9].[0-9]+$)', "")
df_BIP = df_BIP[columns]
df_BIP.to_pickle("res_BIP_%s.pkl" % id_tag, protocol=4)
def prep_KDE(files_KDE, columns, path, id_tag):
dfs = []
for file in files_KDE:
#print(file)
cal_fn, data_name, tag_name, cgt_nr, _, _ = get_strs(file, is_ece = False, is_kde = True) #cal_method, data_name, tag_name, cgt_nr, n_data, seed
with open(join(path, file), "rb") as f:
df = pickle.load(f)
df["calibration_function"] = cal_fn
df["model_name"] = data_name
df["tag_name"] = tag_name
df["cgt_nr"] = cgt_nr
dfs.append(df)
df_KDE = pd.concat(dfs)
for i, row in df_KDE.iterrows():
if isinstance(row.c_hat_distance_p, tuple):
row.c_hat_distance_p = row.c_hat_distance_p[0]
vals = np.array(df_KDE.loc[df_KDE.binning == "KDE_integral", "c_hat_distance_p"].values)
vals = [i[0] for i in vals]
df_KDE.loc[df_KDE.binning == "KDE_integral", "c_hat_distance_p"] = vals
df_KDE = get_cgts(df_KDE)
# Create dummy columns for our method
df_KDE["c_hat_distance_p_debiased"] = df_KDE["c_hat_distance_p"]
df_KDE["c_hat_distance_p_square_debiased"] = df_KDE["c_hat_distance_p_square"]
# Unify calibration_function name column to match ECE_df
df_KDE["calibration_function"] = df_KDE['calibration_function'].str.replace('(_[0-9].[0-9]+$)', "")
df_KDE = df_KDE[columns]
df_KDE.to_pickle("res_KDE_%s.pkl" % id_tag, protocol=4)
# MAIN
IDENT_TAG = "28_05_pre"
PATH_res = "data_1m_final_2805"
COLUMNS = ["model_name", "tag_name", "cgt_nr", "seed", "n_data", "binning", "n_bins", "c_hat_distance_p", "c_hat_distance_p_square", "c_hat_distance_p_debiased",
"c_hat_distance_p_square_debiased", "c_hat_distance_c", "c_hat_distance_c_square", "p_distance_c", "p_distance_c_square", "calibration_function"]
files_ECE = []
files_PW = []
files_KDE = []
files_BIP = [] # beta, iso, platt,
for file in os.listdir(PATH_res):
if file.endswith(".pkl") and not "_m_" in file:
if file.startswith("binning"):
files_ECE.append(file)
elif file.startswith("df_seed"):
if ("gt0_" in file) or ("gt1_" in file) or ("gt2_" in file) or ("gt3_" in file):
files_BIP.append(file)
else:
files_PW.append(file)
elif file.startswith("KDE"):
files_KDE.append(file)
print("ECE files:", len(files_ECE)) # cgt - 612*4, 44 missing? # TODO why? - 44 puudu
print("KDE files:", len(files_KDE)) # Right amount
print("PW files:", len(files_PW)) # PW_NN_mono + PW_NN_SWEEP # Mis siin puudu? 612*10 = 6120` rIGHT AMount
print("BIP files:", len(files_BIP)) # Right amount
print("Start prepping")
#files_ECE = []
if len(files_ECE) != 0:
prep_ECE(files_ECE, COLUMNS, PATH_res, IDENT_TAG)
print("ECE prepped")
if len(files_PW) != 0:
prep_PW(files_PW, COLUMNS, PATH_res, IDENT_TAG)
print("PW prepped")
if len(files_BIP) != 0:
prep_BIP(files_BIP, COLUMNS, PATH_res, IDENT_TAG)
print("BIP prepped")
if len(files_KDE) != 0:
prep_KDE(files_KDE, COLUMNS, PATH_res, IDENT_TAG)
print("KDE prepped")
# ### Put all together
res_dfs = []
if len(files_KDE) != 0:
with open("res_KDE_%s.pkl" % IDENT_TAG, "rb") as f:
res_KDE = | pd.read_pickle(f) | pandas.read_pickle |
# -*- coding:utf-8 -*-
"""
宏观经济数据类
Created on 2019/01/09
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
import pandas as pd
import numpy as np
import re
import json
import time
from gugu.utility import Utility
from gugu.base import Base, cf
import sys
class Macro(Base):
def gdpYear(self, retry=3, pause=0.001):
"""
获取年度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp':, ...}, ...]
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK4224641560861/MacPage_Service.get_pagedata?cate=nation&event=0&from=0&num=70&condition=&_=4224641560861
datastr = self.__parsePage('nation', 0, 70, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_YEAR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def gdpQuarter(self, retry=3, pause=0.001):
"""
获取季度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'quarter':, 'gdp':, ...}, ...]
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3935140379887/MacPage_Service.get_pagedata?cate=nation&event=1&from=0&num=250&condition=&_=3935140379887
datastr = self.__parsePage('nation', 1, 250, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_QUARTER_COLS)
self._data['quarter'] = self._data['quarter'].astype(object)
self._data[self._data==0] = np.NaN
return self._result()
def demandsToGdp(self, retry=3, pause=0.001):
"""
获取三大需求对GDP贡献数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'cons_to':, ...}, ...]
year :统计年度
cons_to :最终消费支出贡献率(%)
cons_rate :最终消费支出拉动(百分点)
asset_to :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_to :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3153587567694/MacPage_Service.get_pagedata?cate=nation&event=4&from=0&num=80&condition=&_=3153587567694
datastr = self.__parsePage('nation', 4, 80, retry, pause)
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
self._data = pd.DataFrame(js,columns=cf.GDP_FOR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def idsPullToGdp(self, retry=3, pause=0.001):
"""
获取三大产业对GDP拉动数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp_yoy':, ...}, ...]
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
self._data = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = pd.Series([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = pd.Series([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = pd.Series([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = pd.Series([], dtype="float", name="arbt_mamm_behav")
self.arbt_mamm_sensory = pd.Series([], dtype="float", name="arbt_mamm_sensory")
# application rate-based toxicity (arbt) : birds (lbs active ingredient/Acre)
self.arbt_bird_mort = pd.Series([], dtype="float", name="arbt_bird_mort")
self.arbt_bird_growth = pd.Series([], dtype="float", name="arbt_bird_growth")
self.arbt_bird_repro = pd.Series([], dtype="float", name="arbt_bird_repro")
self.arbt_bird_behav = pd.Series([], dtype="float", name="arbt_bird_behav")
self.arbt_bird_sensory = pd.Series([], dtype="float", name="arbt_bird_sensory")
# application rate-based toxicity (arbt) : reptiles (lbs active ingredient/Acre)
self.arbt_reptile_mort = pd.Series([], dtype="float", name="arbt_reptile_mort")
self.arbt_reptile_growth = pd.Series([], dtype="float", name="arbt_reptile_growth")
self.arbt_reptile_repro = pd.Series([], dtype="float", name="arbt_reptile_repro")
self.arbt_reptile_behav = pd.Series([], dtype="float", name="arbt_reptile_behav")
self.arbt_reptile_sensory = pd.Series([], dtype="float", name="arbt_reptile_sensory")
# application rate-based toxicity (arbt) : invertebrates (lbs active ingredient/Acre)
self.arbt_inv_1inmill_mort = pd.Series([], dtype="float", name="arbt_inv_1inmill_mort")
self.arbt_inv_1inten_mort = pd.Series([], dtype="float", name="arbt_inv_1inten_mort")
self.arbt_inv_sub_direct = pd.Series([], dtype="float", name="arbt_inv_sub_direct")
self.arbt_inv_sub_indirect = pd.Series([], dtype="float", name="arbt_inv_sub_indirect")
self.arbt_inv_growth = pd.Series([], dtype="float", name="arbt_inv_growth")
self.arbt_inv_repro = pd.Series([], dtype="float", name="arbt_inv_repro")
self.arbt_inv_behav = pd.Series([], dtype="float", name="arbt_inv_behav")
self.arbt_inv_sensory = pd.Series([], dtype="float", name="arbt_inv_sensory")
# plant toxicity (pt) : monocots (lbs active ingredient/Acre)
self.pt_mono_pre_noec = pd.Series([], dtype="float", name="pt_mono_pre_noec")
self.pt_mono_pre_loec = pd.Series([], dtype="float", name="pt_mono_pre_loec")
self.pt_mono_pre_ec25 = pd.Series([], dtype="float", name="pt_mono_pre_ec25")
self.pt_mono_post_noec = pd.Series([], dtype="float", name="pt_mono_post_noec")
self.pt_mono_post_loec = pd.Series([], dtype="float", name="pt_mono_post_loec")
self.pt_mono_post_ec25 = pd.Series([], dtype="float", name="pt_mono_post_ec25")
self.pt_mono_dir_mort = pd.Series([], dtype="float", name="pt_mono_dir_mort")
self.pt_mono_indir_mort = pd.Series([], dtype="float", name="pt_mono_indir_mort")
self.pt_mono_dir_repro = pd.Series([], dtype="float", name="pt_mono_dir_repro")
self.pt_mono_indir_repro = pd.Series([], dtype="float", name="pt_mono_indir_repro")
# plant toxicity (pt) : dicots (lbs active ingredient/Acre)
self.pt_dicot_pre_noec = pd.Series([], dtype="float", name="pt_dicot_pre_noec")
self.pt_dicot_pre_loec = pd.Series([], dtype="float", name="pt_dicot_pre_loec")
self.pt_dicot_pre_ec25 = | pd.Series([], dtype="float", name="pt_dicot_pre_ec25") | pandas.Series |
# Let's start off by loading in Jeff's CDR3's
import numpy as np
import pandas
def getBunker():
total_Abs=pandas.read_csv('app_data/mouse_IgA.dat',sep='\s+',header=None,names=['cdrL1_aa','cdrL2_aa','cdrL3_aa','cdrH1_aa','cdrH2_aa','cdrH3_aa','react'])
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
mono_all=total_abs7[total_abs7['react'].isin([0.0,1.0])].values
poly_all=total_abs7[total_abs7['react'].isin([2.0,3.0,4.0,5.0,6.0,7.0])].values
mono=total_abs7[total_abs7['react'].isin([0.0])].values
poly=total_abs7[total_abs7['react'].isin([5.0,6.0,7.0])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
#####################################################################################
def getJenna():
total_Abs=pandas.read_csv('app_data/flu_IgG.dat',sep='\s+',header=None,
names=['cdrL1_aa','cdrL2_aa','cdrL3_aa','cdrH1_aa','cdrH2_aa','cdrH3_aa','react'])
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
# Having this and the above lines as "if" options could make this loader more generalizable...
mono_all=total_abs7[total_abs7['react'].isin([0,1])].values
poly_all=total_abs7[total_abs7['react'].isin([2,3,4,5,6,7])].values
mono=total_abs7[total_abs7['react'].isin([0])].values
poly=total_abs7[total_abs7['react'].isin([5,6,7])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
def getHugo():
my_heavy=pandas.read_csv('app_data/hiv_igg_data/gut_heavy_aa.dat',sep='\s+')
my_light=pandas.read_csv('app_data/hiv_igg_data/gut_light_aa.dat',sep='\s+')
poly_YN=pandas.read_csv('app_data/hiv_igg_data/gut_num_react.dat',sep='\s+',header=None,names=['react'])
total_abs=pandas.concat([my_light,my_heavy,poly_YN],axis=1)
total_abs7 = total_abs.where((pandas.notnull(total_abs)), '')
mono_all=total_abs7[total_abs7['react'].isin([0,1])].values
poly_all=total_abs7[total_abs7['react'].isin([2,3,4])].values
mono=total_abs7[total_abs7['react'].isin([0])].values
poly=total_abs7[total_abs7['react'].isin([3,4])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
def getHugo_Nature():
my_heavy=pandas.read_csv('app_data/hiv_igg_data/nat_heavy_aa.dat',sep='\s+')
my_light=pandas.read_csv('app_data/hiv_igg_data/nat_light_aa.dat',sep='\s+')
poly_YN=pandas.read_csv('app_data/hiv_igg_data/nat_num_react.dat',sep='\s+',header=None,names=['react'])
total_Abs=pandas.concat([my_light,my_heavy,poly_YN],axis=1)
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
# And finish it up...
mono_all=total_abs7[total_abs7['react'].isin([0.0,1.0])].values
poly_all=total_abs7[total_abs7['react'].isin([2.0,3.0,4.0,5.0,6.0])].values
mono=total_abs7[total_abs7['react'].isin([0.0])].values
poly=total_abs7[total_abs7['react'].isin([5.0,6.0])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
def getHugo_NatCNTRL():
my_heavy=pandas.read_csv('app_data/hiv_igg_data/nat_cntrl_heavy_aa.dat',sep='\s+')
my_light=pandas.read_csv('app_data/hiv_igg_data/nat_cntrl_light_aa.dat',sep='\s+')
poly_YN=pandas.read_csv('app_data/hiv_igg_data/nat_cntrl_num_react.dat',sep='\s+',header=None,names=['react'])
total_Abs=pandas.concat([my_light,my_heavy,poly_YN],axis=1)
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
# And finish it up...
mono_all=total_abs7[total_abs7['react'].isin([0.0,1.0])].values
poly_all=total_abs7[total_abs7['react'].isin([2.0,3.0,4.0,5.0,6.0])].values
mono=total_abs7[total_abs7['react'].isin([0.0])].values
poly=total_abs7[total_abs7['react'].isin([5.0,6.0])].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(mono[:,5])):
if mono[i,5] == '' or mono[i,4] == '' or mono[i,3] == '' or mono[i,2] == '' or mono[i,1] == '' or mono[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono2=np.delete(mono,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly[:,5])):
if poly[i,5] == '' or poly[i,4] == '' or poly[i,3] == '' or poly[i,2] == '' or poly[i,1] == '' or poly[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly2=np.delete(poly,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]),np.transpose(mono2[:,0:6]),np.transpose(poly2[:,0:6]))
def getHugo_PLOS():
my_heavy=pandas.read_csv('app_data/hiv_igg_data/plos_heavy_aa.dat',sep='\s+')
my_light=pandas.read_csv('app_data/hiv_igg_data/plos_light_aa.dat',sep='\s+')
poly_YN=pandas.read_csv('app_data/hiv_igg_data/plos_yn.dat',sep='\s+',header=None,names=['YN'])
total_Abs=pandas.concat([my_light,my_heavy,poly_YN],axis=1)
total_abs1 = total_Abs.where((pandas.notnull(total_Abs)), '')
# Remove X's in sequences... Should actually get a count of these at some point...
total_abs2=total_abs1[~total_abs1['cdrL1_aa'].str.contains("X")]
total_abs3=total_abs2[~total_abs2['cdrL2_aa'].str.contains("X")]
total_abs4=total_abs3[~total_abs3['cdrL3_aa'].str.contains("X")]
total_abs5=total_abs4[~total_abs4['cdrH1_aa'].str.contains("X")]
total_abs6=total_abs5[~total_abs5['cdrH2_aa'].str.contains("X")]
total_abs7=total_abs6[~total_abs6['cdrH3_aa'].str.contains("X")]
# And finish it up...
mono_all=total_abs7[total_abs7['YN']=='N'].values
poly_all=total_abs7[total_abs7['YN']=='Y'].values
a=0
del_these=[]
for i in np.arange(len(mono_all[:,5])):
if mono_all[i,5] == '' or mono_all[i,4] == '' or mono_all[i,3] == '' or mono_all[i,2] == '' or mono_all[i,1] == '' or mono_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
mono_all2=np.delete(mono_all,del_these,axis=0)
a=0
del_these=[]
for i in np.arange(len(poly_all[:,5])):
if poly_all[i,5] == '' or poly_all[i,4] == '' or poly_all[i,3] == '' or poly_all[i,2] == '' or poly_all[i,1] == '' or poly_all[i,0] == '':
if a == 0:
del_these=i
else:
del_these=np.vstack((del_these,i))
a=a+1
poly_all2=np.delete(poly_all,del_these,axis=0)
return(np.transpose(mono_all2[:,0:6]),np.transpose(poly_all2[:,0:6]))
def getAdimab():
heavy_Abs=pandas.read_csv('app_data/adimab_data/cdrs_H_final.txt',sep='\s+',header=None,names=['cdrH1_aa','cdrH2_aa','cdrH3_aa'])
light_Abs=pandas.read_csv('app_data/adimab_data/cdrs_L_final.txt',sep='\s+',header=None,names=['cdrL1_aa','cdrL2_aa','cdrL3_aa'])
outcomes=pandas.read_csv('app_data/adimab_data/drug_outcomes.csv',sep=',',header=0)
assays=pandas.read_csv('app_data/adimab_data/drug_properties.csv',sep=',',header=0)
names=outcomes['Name']
clinical=outcomes['Clinical Status']
phage=outcomes['Phagec']
elisa_polyScores=assays['ELISA']
psr_assayScore=assays['Poly-Specificity Reagent (PSR) SMP Score (0-1)']
total_Abs=pandas.concat([names,heavy_Abs,clinical,light_Abs,phage,assays.loc[:, assays.columns != 'Unnamed: 13']],axis=1).dropna()
# Let's not process this data, just return the matrix
return(total_Abs)
#####################################################################################
def getSabDab():
heavy_Abs=pandas.read_csv('app_data/SabDab_data/nonAdimab_igblast/cdrs_H_final.txt',sep='\s+',header=None,names=['cdrH1_aa','cdrH2_aa','cdrH3_aa'])
light_Abs=pandas.read_csv('app_data/SabDab_data/nonAdimab_igblast/cdrs_L_final.txt',sep='\s+',header=None,names=['cdrL1_aa','cdrL2_aa','cdrL3_aa'])
notAdi=pandas.read_csv('app_data/SabDab_data/non_adimab_dataHu.csv',sep=',',header=0)
Adi=pandas.read_csv('app_data/SabDab_data/adimab_SabDabdata.csv',sep=',',header=0)
adiName=Adi['Therapeutic']; NotadiName=notAdi['Therapeutic']
adiOutcome=Adi["Highest_Clin_Trial (Jan '20)"]
NotadiOutcome=notAdi["Highest_Clin_Trial (Jan '20)"]
adiDeact=Adi['Est. Status']; NotadiDeact=notAdi['Est. Status']
adimab_info= | pandas.concat([adiName,adiOutcome,adiDeact],axis=1) | pandas.concat |
#!/usr/bin/env python
"""
This script enables training and comparison of models on multiple GPUs.
Usage:
```
python scripts/automate_training.py -c path/to/config.json -p path/to/config_hyper.json \
-n number_of_iterations --all-combin
```
"""
import argparse
import copy
import itertools
from functools import partial
import json
import random
import collections.abc
import shutil
import sys
import joblib
import pandas as pd
import numpy as np
import torch.multiprocessing as mp
from ivadomed.loader.bids_dataframe import BidsDataframe
import ivadomed.scripts.visualize_and_compare_testing_models as violin_plots
from pathlib import Path
from loguru import logger
from ivadomed import main as ivado
from ivadomed import config_manager as imed_config_manager
from ivadomed.loader import utils as imed_loader_utils
from ivadomed.scripts.compare_models import compute_statistics
from ivadomed import utils as imed_utils
from ivadomed.keywords import ConfigKW,SplitDatasetKW, LoaderParamsKW
LOG_FILENAME = 'log.txt'
logger.add(LOG_FILENAME)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", required=True, help="Base config file path.",
metavar=imed_utils.Metavar.file)
parser.add_argument("-ch", "--config-hyper", dest="config_hyper", required=True,
help="JSON file where hyperparameters to experiment are listed.",
metavar=imed_utils.Metavar.file)
parser.add_argument("-pd", "--path-data", required=False, help="Path to BIDS data.",
metavar=imed_utils.Metavar.int)
parser.add_argument("-n", "--n-iterations", dest="n_iterations", default=1,
type=int, help="Number of times to run each config.",
metavar=imed_utils.Metavar.int)
parser.add_argument("--all-combin", dest='all_combin', action='store_true',
help="To run all combinations of config"),
parser.add_argument("-m", "--multi-params", dest="multi_params", action='store_true',
help="To change multiple parameters at once.")
parser.add_argument("--run-test", dest='run_test', action='store_true',
help="Evaluate the trained model on the testing sub-set.")
parser.add_argument("--fixed-split", dest='fixed_split', action='store_true',
help="Keep a constant dataset split for all configs and iterations")
parser.add_argument("-l", "--all-logs", dest="all_logs", action='store_true',
help="Keep all log directories for each iteration.")
parser.add_argument('-t', '--thr-increment', dest="thr_increment", required=False, type=float,
help="""A threshold analysis is performed at the end of the training using
the trained model and the validation sub-dataset to find the optimal
binarization threshold. The specified value indicates the increment
between 0 and 1 used during the analysis (e.g. 0.1).""",
metavar=imed_utils.Metavar.float)
parser.add_argument("-o", "--output_dir", required=False,
help="Output Folder.")
return parser
def train_worker(config, thr_incr):
"""
Args:
config (dict): dictionary containing configuration details.
thr_incr (float): A threshold analysis is performed at the end of the training
using the trained model and the validation sub-dataset to find the optimal binarization
threshold. The specified value indicates the increment between 0 and 1 used during the
ROC analysis (e.g. 0.1). Flag: ``-t``, ``--thr-increment``
"""
current = mp.current_process()
# ID of process used to assign a GPU
ID = int(current.name[-1]) - 1
# Use GPU i from the array specified in the config file
config[ConfigKW.GPU_IDS] = [config[ConfigKW.GPU_IDS][ID]]
# Call ivado cmd_train
try:
# Save best validation score
config[ConfigKW.COMMAND] = "train"
best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = \
ivado.run_command(config, thr_increment=thr_incr)
except Exception:
logger.exception('Got exception on main handler')
logger.info("Unexpected error:", sys.exc_info()[0])
raise
# Save config file in output path
config_copy = open(config[ConfigKW.PATH_OUTPUT] + "/config_file.json", "w")
json.dump(config, config_copy, indent=4)
return config[ConfigKW.PATH_OUTPUT], best_training_dice, best_training_loss, best_validation_dice, \
best_validation_loss
def test_worker(config):
# Call ivado cmd_eval
current = mp.current_process()
# ID of process used to assign a GPU
ID = int(current.name[-1]) - 1
# Use GPU i from the array specified in the config file
config[ConfigKW.GPU_IDS] = [config[ConfigKW.GPU_IDS][ID]]
try:
# Save best test score
config[ConfigKW.COMMAND] = "test"
df_results, test_dice = ivado.run_command(config)
except Exception:
logger.exception('Got exception on main handler')
logger.info("Unexpected error:", sys.exc_info()[0])
raise
return config[ConfigKW.PATH_OUTPUT], test_dice, df_results
def split_dataset(initial_config):
"""
Args:
initial_config (dict): The original config file, which we use as a basis from which
to modify our hyperparameters.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
"""
loader_parameters = initial_config[ConfigKW.LOADER_PARAMETERS]
path_output = Path(initial_config[ConfigKW.PATH_OUTPUT])
if not path_output.is_dir():
logger.info(f'Creating output path: {path_output}')
path_output.mkdir(parents=True)
else:
logger.info(f'Output path already exists: {path_output}')
bids_df = BidsDataframe(loader_parameters, str(path_output), derivatives=True)
train_lst, valid_lst, test_lst = imed_loader_utils.get_new_subject_file_split(
df=bids_df.df,
data_testing=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.DATA_TESTING],
split_method=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.SPLIT_METHOD],
random_seed=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.RANDOM_SEED],
train_frac=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.TRAIN_FRACTION],
test_frac=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.TEST_FRACTION],
path_output="./",
balance=initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.BALANCE] \
if SplitDatasetKW.BALANCE in initial_config[ConfigKW.SPLIT_DATASET] else None
)
# save the subject distribution
split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst}
split_path = "./" + "common_split_datasets.joblib"
joblib.dump(split_dct, split_path)
initial_config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.FNAME_SPLIT] = split_path
return initial_config
def make_config_list(param_list, initial_config, all_combin, multi_params):
"""Create a list of config dictionaries corresponding to different hyperparameters.
Args:
param_list (list)(HyperparameterOption): A list of the different hyperparameter options.
initial_config (dict): The original config file, which we use as a basis from which
to modify our hyperparameters.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
all_combin (bool): If true, combine the hyperparameters combinatorically.
multi_params (bool): If true, combine the hyperparameters by index in the list, i.e.
all the first elements, then all the second elements, etc.
Returns:
list, dict: A list of configuration dictionaries, modified by the hyperparameters.
.. code-block:: python
config_list = [
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'DiceLoss'}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "FocalLoss", "gamma": 0.2, "alpha": 0.5}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'FocalLoss', 'gamma': 0.2, 'alpha': 0.5}"
},
# etc...
]
"""
config_list = []
if all_combin:
keys = set([hyper_option.base_key for hyper_option in param_list])
for combination in list(itertools.combinations(param_list, len(keys))):
if keys_are_unique(combination):
new_config = copy.deepcopy(initial_config)
path_output = new_config[ConfigKW.PATH_OUTPUT]
for hyper_option in combination:
new_config = update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
path_output = path_output + folder_name_suffix
new_config[ConfigKW.PATH_OUTPUT] = path_output
config_list.append(new_config)
elif multi_params:
base_keys = get_base_keys(param_list)
base_key_dict = {key: [] for key in base_keys}
for hyper_option in param_list:
base_key_dict[hyper_option.base_key].append(hyper_option)
max_length = np.min([len(base_key_dict[base_key]) for base_key in base_key_dict.keys()])
for i in range(0, max_length):
new_config = copy.deepcopy(initial_config)
path_output = new_config[ConfigKW.PATH_OUTPUT]
for key in base_key_dict.keys():
hyper_option = base_key_dict[key][i]
new_config = update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
path_output = path_output + folder_name_suffix
new_config[ConfigKW.PATH_OUTPUT] = path_output
config_list.append(new_config)
else:
for hyper_option in param_list:
new_config = copy.deepcopy(initial_config)
update_dict(new_config, hyper_option.option, hyper_option.base_key)
folder_name_suffix = hyper_option.name
folder_name_suffix = folder_name_suffix.translate({ord(i): None for i in '[]}{ \''})
folder_name_suffix = folder_name_suffix.translate({ord(i): '-' for i in ':=,'})
new_config[ConfigKW.PATH_OUTPUT] = initial_config[ConfigKW.PATH_OUTPUT] + folder_name_suffix
config_list.append(new_config)
return config_list
class HyperparameterOption:
"""Hyperparameter option to edit config dictionary.
This class is used to edit a standard config file. For example, say we want to edit the
following config file:
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
Say we want to change the ``loss``. We could have:
.. code-block::
base_key = "loss"
base_option = {"name": "FocalLoss", "gamma": 0.5}
option = {"training_parameters": {"loss": {"name": "FocalLoss", "gamma": 0.5}}}
Attributes:
base_key (str): the key whose value you want to edit.
option (dict): the full tree path to the value you want to insert.
base_option (dict): the value you want to insert.
name (str): the name to be used for the output folder.
"""
def __init__(self, base_key=None, option=None, base_option=None):
self.base_key = base_key
self.option = option
self.base_option = base_option
self.name = None
self.create_name_str()
def __eq__(self, other):
return self.base_key == other.base_key and self.option == other.option
def create_name_str(self):
self.name = "-" + str(self.base_key) + "=" + str(self.base_option).replace("/", "_")
def get_param_list(my_dict, param_list, superkeys):
"""Recursively create the list of hyperparameter options.
Args:
my_dict (dict): A dictionary of parameters.
param_list (list)(HyperparameterOption): A list of HyperparameterOption objects.
superkeys (list)(str): TODO
Returns:
list, HyperparameterOption: A list of HyperparameterOption objects.
"""
for key, value in my_dict.items():
if type(value) is list:
for element in value:
dict_prev = {key: element}
for superkey in reversed(superkeys):
dict_new = {}
dict_new[superkey] = dict_prev
if len(superkeys) == 0:
dict_new = dict_prev
hyper_option = HyperparameterOption(base_key=key, option=dict_new,
base_option=element)
param_list.append(hyper_option)
else:
param_list = get_param_list(value, param_list, superkeys + [key])
return param_list
def update_dict(d, u, base_key):
"""Update a given dictionary recursively with a new sub-dictionary.
Example 1:
.. code-block:: python
d = {
'foo': {
'bar': 'some example text',
'baz': {'zag': 5}
}
}
u = {'foo': {'baz': {'zag': 7}}}
base_key = 'zag'
>>> print(update_dict(d, u, base_key))
{
'foo': {
'bar': 'some example text',
'baz': {'zag': 7}
}
}
Example 2:
.. code-block:: python
d = {
'foo': {
'bar': 'some example text',
'baz': {'zag': 5}
}
}
u = {'foo': {'baz': {'zag': 7}}}
base_key = 'foo'
>>> print(update_dict(d, u, base_key))
{
'foo': {
'baz': {'zag': 7}
}
}
Args:
d (dict): A dictionary to update.
u (dict): A subdictionary to update the original one with.
base_key (str): the string indicating which level to update.
Returns:
dict: An updated dictionary.
"""
for k, v in u.items():
if k == base_key:
d[k] = v
elif isinstance(v, collections.abc.Mapping):
d[k] = update_dict(d.get(k, {}), v, base_key)
else:
d[k] = v
return d
def keys_are_unique(hyperparam_list):
"""Check if the ``base_keys`` in a list of ``HyperparameterOption`` objects are unique.
Args:
hyperparam_list (list)(HyperparameterOption): a list of hyperparameter options.
Returns:
bool: True if all the ``base_keys`` are unique, otherwise False.
"""
keys = [item.base_key for item in hyperparam_list]
keys = set(keys)
return len(keys) == len(hyperparam_list)
def get_base_keys(hyperparam_list):
"""Get a list of base_keys from a param_list.
Args:
hyperparam_list (list)(HyperparameterOption): a list of hyperparameter options.
Returns:
base_keys (list)(str): a list of base_keys.
"""
base_keys_all = [hyper_option.base_key for hyper_option in hyperparam_list]
base_keys = []
for base_key in base_keys_all:
if base_key not in base_keys:
base_keys.append(base_key)
return base_keys
def format_results(results_df, config_list, param_list):
"""Merge config and results in a df."""
config_df = pd.DataFrame.from_dict(config_list)
keep = list(set([list(hyper_option.option.keys())[0] for hyper_option in param_list]))
keep.append(ConfigKW.PATH_OUTPUT)
config_df = config_df[keep]
results_df = config_df.set_index(ConfigKW.PATH_OUTPUT).join(results_df.set_index(ConfigKW.PATH_OUTPUT))
results_df = results_df.reset_index()
results_df = results_df.sort_values(by=['best_validation_loss'])
return results_df
def automate_training(file_config, file_config_hyper, fixed_split, all_combin, path_data=None,
n_iterations=1, run_test=False, all_logs=False, thr_increment=None,
multi_params=False, output_dir=None, plot_comparison=False):
"""Automate multiple training processes on multiple GPUs.
Hyperparameter optimization of models is tedious and time-consuming. This function automatizes
this optimization across multiple GPUs. It runs trainings, on the same training and validation
datasets, by combining a given set of parameters and set of values for each of these parameters.
Results are collected for each combination and reported into a dataframe to allow their
comparison. The script efficiently allocates each training to one of the available GPUs.
Usage Example::
ivadomed_automate_training -c config.json -p config_hyper.json -n n_iterations
.. csv-table:: Example of dataframe
:file: ../../images/detailed_results.csv
Config File:
The config file is the standard config file used in ``ivadomed`` functions. We use this
as the basis. We call a key of this config file a ``category``. In the example below,
we would say that ``training_parameters``, ``default_model``, and ``path_output`` are
``categories``.
.. code-block:: JSON
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/"
}
Hyperparameter Config File:
The hyperparameter config file should have the same layout as the config file. To select
a hyperparameter you would like to vary, just list the different options under the
appropriate key, which we call the ``base_key``. In the example below, we want to vary the
``loss``, ``depth``, and ``model_name``; these are our 3 ``base_keys``. As you can see,
we have listed our different options for these keys. For ``depth``, we have listed
``2``, ``3``, and ``4`` as our different options.
How we implement this depends on 3 settings: ``all_combin``, ``multi_param``,
or the default.
.. code-block:: JSON
{
"training_parameters": {
"loss": [
{"name": "DiceLoss"},
{"name": "FocalLoss", "gamma": 0.2, "alpha" : 0.5}
],
},
"default_model": {"depth": [2, 3, 4]},
"model_name": ["seg_sc_t2star", "find_disc_t1"]
}
Default:
The default option is to change only one parameter at a time relative to the base
config file. We then create a list of config options, called ``config_list``.
Using the examples above, we would have ``2 + 2 + 3 = 7`` different config options:
.. code-block:: python
config_list = [
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'DiceLoss'}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "FocalLoss", "gamma": 0.2, "alpha": 0.5}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 3
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-loss={'name': 'FocalLoss', 'gamma': 0.2, 'alpha': 0.5}"
},
{
"training_parameters": {
"batch_size": 18,
"loss": {"name": "DiceLoss"}
},
"default_model": {
"name": "Unet",
"dropout_rate": 0.3,
"depth": 2
},
"model_name": "seg_tumor_t2",
"path_output": "./tmp/-depth=2"
},
# etc ...
]
All Combinations:
If we select the ``all_combin`` option, we will create a list of configuration options
combinatorically. Using the config examples above, we would have ``2 * 3 * 2 = 12``
different config options. I'm not going to write out the whole ``config_list`` because it's
quite long, but here are the combinations:
.. code-block::
loss = DiceLoss, depth = 2, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 2, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 3, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 3, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 4, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 4, model_name = "seg_sc_t2star"
loss = DiceLoss, depth = 2, model_name = "find_disc_t1"
loss = FocalLoss, depth = 2, model_name = "find_disc_t1"
loss = DiceLoss, depth = 3, model_name = "find_disc_t1"
loss = FocalLoss, depth = 3, model_name = "find_disc_t1"
loss = DiceLoss, depth = 4, model_name = "find_disc_t1"
loss = FocalLoss, depth = 4, model_name = "find_disc_t1"
Multiple Parameters:
The ``multi_params`` option entails changing all the first elements from the list,
then all the second parameters from the list, etc. If the lists are different lengths,
we will just use the first ``n`` elements. In our example above, the lists are of length
2 or 3, so we will only use the first 2 elements:
.. code-block::
loss = DiceLoss, depth = 2, model_name = "seg_sc_t2star"
loss = FocalLoss, depth = 3, model_name = "find_disc_t1"
Args:
file_config (string): Configuration filename, which is used as skeleton to configure the
training. This is the standard config file used in ``ivadomed`` functions. In the
code, we call the keys from this config file ``categories``.
Flag: ``--config``, ``-c``
file_config_hyper (string): json file containing parameters configurations to compare.
Parameter "keys" of this file need to match the parameter "keys" of `config` file.
Parameter "values" are in a list. Flag: ``--config-hyper``, ``-ch``
Example::
{"default_model": {"depth": [2, 3, 4]}}
fixed_split (bool): If True, all the experiments are run on the same
training/validation/testing subdatasets. Flag: ``--fixed-split``
all_combin (bool): If True, all parameters combinations are run. Flag: ``--all-combin``
n_iterations (int): Controls the number of time that each experiment (ie set of parameter)
are run. Flag: ``--n-iteration``, ``-n``
run_test (bool): If True, the trained model is also run on the testing subdataset and violiplots are displayed
with the dicescores for each new output folder created.
Flag: ``--run-test``
all_logs (bool): If True, all the log directories are kept for every iteration.
Flag: ``--all-logs``, ``-l``
thr_increment (float): A threshold analysis is performed at the end of the training
using the trained model and the validation sub-dataset to find the optimal binarization
threshold. The specified value indicates the increment between 0 and 1 used during the
ROC analysis (e.g. 0.1). Flag: ``-t``, ``--thr-increment``
multi_params (bool): If True, more than one parameter will be change at the time from
the hyperparameters. All the first elements from the hyperparameters list will be
applied, then all the second, etc.
output_dir (str): Path to where the results will be saved.
"""
if output_dir and not Path(output_dir).exists():
Path(output_dir).mkdir(parents=True)
if not output_dir:
output_dir = ""
# Load initial config
initial_config = imed_config_manager.ConfigurationManager(file_config).get_config()
if path_data is not None:
initial_config[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.PATH_DATA] = path_data
# Split dataset if not already done
if fixed_split and (initial_config.get(ConfigKW.SPLIT_PATH) is None):
initial_config = split_dataset(initial_config)
# Hyperparameters values to experiment
with Path(file_config_hyper).open(mode="r") as fhandle:
config_hyper = json.load(fhandle)
param_list = get_param_list(config_hyper, [], [])
config_list = make_config_list(param_list, initial_config, all_combin, multi_params)
# CUDA problem when forking process
# https://github.com/pytorch/pytorch/issues/2517
ctx = mp.get_context("spawn")
# Run all configs on a separate process, with a maximum of n_gpus processes at a given time
logger.info(initial_config[ConfigKW.GPU_IDS])
results_df = pd.DataFrame()
eval_df = pd.DataFrame()
all_mean = pd.DataFrame()
with ctx.Pool(processes=len(initial_config[ConfigKW.GPU_IDS])) as pool:
for i in range(n_iterations):
if not fixed_split:
# Set seed for iteration
seed = random.randint(1, 10001)
for config in config_list:
config[ConfigKW.SPLIT_DATASET][SplitDatasetKW.RANDOM_SEED] = seed
if all_logs:
if i:
config[ConfigKW.PATH_OUTPUT] = config[ConfigKW.PATH_OUTPUT].replace("_n=" + str(i - 1).zfill(2),
"_n=" + str(i).zfill(2))
else:
config[ConfigKW.PATH_OUTPUT] += "_n=" + str(i).zfill(2)
validation_scores = pool.map(partial(train_worker, thr_incr=thr_increment), config_list)
val_df = pd.DataFrame(validation_scores, columns=[
'path_output', 'best_training_dice', 'best_training_loss', 'best_validation_dice',
'best_validation_loss'])
if run_test:
new_config_list = []
for config in config_list:
# Delete path_pred
path_pred = Path(config['path_output'], 'pred_masks')
if path_pred.is_dir() and n_iterations > 1:
try:
shutil.rmtree(str(path_pred))
except OSError as e:
logger.info(f"Error: {e.filename} - {e.strerror}.")
# Take the config file within the path_output because binarize_prediction may have been updated
json_path = Path(config[ConfigKW.PATH_OUTPUT], 'config_file.json')
new_config = imed_config_manager.ConfigurationManager(str(json_path)).get_config()
new_config[ConfigKW.GPU_IDS] = config[ConfigKW.GPU_IDS]
new_config_list.append(new_config)
test_results = pool.map(test_worker, new_config_list)
df_lst = []
# Merge all eval df together to have a single excel file
for j, result in enumerate(test_results):
df = result[-1]
if i == 0:
all_mean = df.mean(axis=0)
std_metrics = df.std(axis=0)
metrics = pd.concat([all_mean, std_metrics], sort=False, axis=1)
else:
all_mean = pd.concat([all_mean, df.mean(axis=0)], sort=False, axis=1)
mean_metrics = all_mean.mean(axis=1)
std_metrics = all_mean.std(axis=1)
metrics = pd.concat([mean_metrics, std_metrics], sort=False, axis=1)
metrics.rename({0: "mean"}, axis=1, inplace=True)
metrics.rename({1: "std"}, axis=1, inplace=True)
id = result[0].split("_n=")[0]
cols = metrics.columns.values
for idx, col in enumerate(cols):
metrics.rename({col: col + "_" + id}, axis=1, inplace=True)
df_lst.append(metrics)
test_results[j] = result[:2]
# Init or add eval results to dataframe
eval_df = pd.concat(df_lst, sort=False, axis=1)
test_df = | pd.DataFrame(test_results, columns=['path_output', 'test_dice']) | pandas.DataFrame |
from typing import List
import pandas as pd
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from pydantic import BaseModel, Field
from toucan_connectors.common import nosql_apply_parameters_to_query
from toucan_connectors.google_credentials import GoogleCredentials
from toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource
API = 'analyticsreporting'
SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
VERSION = 'v4'
class Dimension(BaseModel):
name: str
histogramBuckets: List[str] = None
class DimensionFilter(BaseModel):
dimensionName: str
operator: str
expressions: List[str] = None
caseSensitive: bool = False
class Config:
# TODO `not` param is not implemented
extra = 'allow'
class DimensionFilterClause(BaseModel):
operator: str
filters: List[DimensionFilter]
class DateRange(BaseModel):
startDate: str
endDate: str
class Metric(BaseModel):
expression: str
alias: str = None
class Config:
# TODO `metricType` param is not implemented
extra = 'allow'
class MetricFilter(BaseModel):
metricName: str
operator: str
comparisonValue: str
class Config:
# TODO `not` param is not implemented
extra = 'allow'
class MetricFilterClause(BaseModel):
operator: str
filters: List[MetricFilter]
class OrderBy(BaseModel):
fieldName: str
orderType: str = None
sortOrder: str = None
class Pivot(BaseModel):
dimensions: List[Dimension] = None
dimensionFilterClauses: List[DimensionFilterClause] = None
metrics: List[Metric] = None
startGroup: int = None
maxGroupCount: int = None
class Cohort(BaseModel):
name: str
type: str
dateRange: DateRange = None
class CohortGroup(BaseModel):
cohorts: List[Cohort]
lifetimeValue: bool = False
class Segment(BaseModel):
segmentId: str = None
# TODO dynamicSegment: DynamicSegment
class ReportRequest(BaseModel):
viewId: str
dateRanges: List[DateRange] = None
samplingLevel: str = None
dimensions: List[Dimension] = None
dimensionFilterClauses: List[DimensionFilterClause] = None
metrics: List[Metric] = None
metricFilterClauses: List[MetricFilterClause] = None
filtersExpression: str = ''
orderBys: List[OrderBy] = []
segments: List[Segment] = []
pivots: List[Pivot] = None
cohortGroup: CohortGroup = None
pageToken: str = ''
pageSize: int = 10000
includeEmptyRows: bool = False
hideTotals: bool = False
hideValueRanges: bool = False
def get_dict_from_response(report, request_date_ranges):
columnHeader = report.get('columnHeader', {})
dimensionHeaders = columnHeader.get('dimensions', [])
metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
rows = report.get('data', {}).get('rows', [])
all_rows = []
for row_index, row in enumerate(rows):
dimensions = row.get('dimensions', [])
dateRangeValues = row.get('metrics', [])
for i, values in enumerate(dateRangeValues):
for metricHeader, value in zip(metricHeaders, values.get('values')):
row_dict = {
'row_index': row_index,
'date_range_id': i,
'metric_name': metricHeader.get('name'),
}
if request_date_ranges and (len(request_date_ranges) >= i):
row_dict['start_date'] = request_date_ranges[i].startDate
row_dict['end_date'] = request_date_ranges[i].endDate
if metricHeader.get('type') == 'INTEGER':
row_dict['metric_value'] = int(value)
elif metricHeader.get('type') == 'FLOAT':
row_dict['metric_value'] = float(value)
else:
row_dict['metric_value'] = value
for dimension_name, dimension_value in zip(dimensionHeaders, dimensions):
row_dict[dimension_name] = dimension_value
all_rows.append(row_dict)
return all_rows
def get_query_results(service, report_request):
response = service.reports().batchGet(body={'reportRequests': report_request.dict()}).execute()
return response.get('reports', [])[0]
class GoogleAnalyticsDataSource(ToucanDataSource):
report_request: ReportRequest = Field(
...,
title='Report request',
description='See the complete '
'<a href="https://developers.google.com/analytics/devguides/reporting/core/v4/rest/v4/reports/batchGet#reportrequest" target="_blank">Google documentation</a>',
)
class GoogleAnalyticsConnector(ToucanConnector):
data_source_model: GoogleAnalyticsDataSource
credentials: GoogleCredentials = Field(
...,
title='Google Credentials',
description='For authentication, download an authentication file from your '
'<a href="https://console.developers.google.com/apis/credentials" target="_blank">Google Console</a> '
'and use the values here. This is an oauth2 credential file. For more information see this '
'<a href="https://gspread.readthedocs.io/en/latest/oauth2.html" target="_blank">documentation</a>. '
'You should use "service_account" credentials, which is the preferred type of credentials '
'to use when authenticating on behalf of a service or application',
)
scope: List[str] = Field(
[SCOPE],
description='OAuth 2.0 scopes define the level of access you need to '
'request the Google APIs. For more information, see this '
'<a href="https://developers.google.com/identity/protocols/googlescopes" target="_blank">documentation</a>',
)
def _retrieve_data(self, data_source: GoogleAnalyticsDataSource) -> pd.DataFrame:
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
self.credentials.dict(), self.scope
)
service = build(API, VERSION, credentials=credentials)
report_request = ReportRequest(
**nosql_apply_parameters_to_query(
data_source.report_request.dict(), data_source.parameters
)
)
report = get_query_results(service, report_request)
reports_data = [pd.DataFrame(get_dict_from_response(report, report_request.dateRanges))]
while 'nextPageToken' in report:
report_request.pageToken = report['nextPageToken']
report = get_query_results(service, report_request)
reports_data.append(
pd.DataFrame(get_dict_from_response(report, report_request.dateRanges))
)
return | pd.concat(reports_data) | pandas.concat |
# +
import numpy as np
import pandas as pd
from .plot_spatial import plot_spatial_general as plot_spatial
def interpolate_coord(start=10, end=5, steps=100, accel_power=3, accelerate=True, jitter=None):
r"""
Interpolate coordinates between start_array and end_array positions in N steps
with non-linearity in movement according to acc_power,
and accelerate change in coordinates (True) or slow it down (False).
:param jitter: shift positions by a random number by sampling:
new_coord = np.random.normal(mean=coord, sd=jitter), reasonable values 0.01-0.1
"""
seq = np.linspace(np.zeros_like(start), np.ones_like(end), steps)
seq = seq ** accel_power
if jitter is not None:
seq = np.random.normal(loc=seq, scale=jitter * np.abs(seq))
seq[0] = np.zeros_like(start)
seq[steps - 1] = np.ones_like(end)
if accelerate:
seq = 1 - seq
start
seq = seq * (start - end) + end
if not accelerate:
seq = np.flip(seq, axis=0)
return seq
def expand_1by1(df):
col6 = [df.copy() for i in range(df.shape[1])]
index = df.index.astype(str)
columns = df.columns
for i in range(len(col6)):
col6_1 = col6[i]
col6_1_new = np.zeros_like(col6_1)
col6_1_new[:, i] = col6_1[col6_1.columns[i]].values
col6_1_new = pd.DataFrame(col6_1_new, index=index + str(i), columns=columns)
col6[i] = col6_1_new
return pd.concat(col6, axis=0)
def plot_video_mapping(
adata_vis,
adata,
sample_ids,
spot_factors_df,
sel_clust,
sel_clust_col,
sample_id,
sc_img=None,
sp_img=None,
sp_img_scaling_fac=1,
adata_cluster_col="annotation_1",
cell_fact_df=None,
step_n=[20, 100, 15, 45, 80, 30],
step_quantile=[1, 1, 1, 1, 0.95, 0.95],
sc_point_size=1,
aver_point_size=20,
sp_point_size=5,
reorder_cmap=range(7),
label_clusters=False,
style="dark_background",
adjust_text=False,
sc_alpha=0.6,
sp_alpha=0.8,
img_alpha=0.8,
sc_power=20,
sp_power=20,
sc_accel_power=3,
sp_accel_power=3,
sc_accel_decel=True,
sp_accel_decel=False,
sc_jitter=None,
sp_jitter=None,
save_path="./results/mouse_viseum_snrna/std_model/mapping_video/",
crop_x=None,
crop_y=None,
save_extension="png",
colorbar_shape={"vertical_gaps": 2, "horizontal_gaps": 0.13},
):
r"""
Create frames for a video illustrating the approach from UMAP of single cells to their spatial locations.
We use linear interpolation of UMAP and spot coordinates to create movement.
:param adata_vis: anndata with Visium data (including spatial slot in `.obsm`)
:param adata: anndata with single cell data (including X_umap slot in `.obsm`)
:param sample_ids: pd.Series - sample ID for each spot
:param spot_factors_df: output of the model showing spatial expression of cell types / factors.
:param sel_clust: selected cluster names in `adata_cluster_col` column of adata.obs
:param sel_clust_col: selected cluster column name in spot_factors_df
:param sample_id: sample id to use for visualisation
:param adata_cluster_col: column in adata.obs containing cluster annotations
:param cell_fact_df: alternative to adata_cluster_col, pd.DataFrame specifying class for each cell (can be continuous).
:param step_n: how many frames to record in each step: UMAP, UMAP collapsing into averages, averages, averages expanding into locations, locations.
:param step_quantile: how to choose maximum colorscale limit in each step? (quantile) Use 1 for discrete values.
:param sc_point_size: point size for cells
:param aver_point_size: point size for averages
:param sp_point_size: point size for spots
:param fontsize: size of text label of averages
:param adjust_text: adjust text label position to avoid overlaps
:param sc_alpha, sp_alpha: color alpha scaling for single cells and spatial.
:param sc_power, sp_power: change dot size nonlinearly with this exponent
:param sc_accel_power, sp_accel_power: change movement speed size nonlinearly with this exponent
:param sc_accel_decel, sp_accel_decel: accelerate (True) or decelereate (False)
:param save_path: path where to save frames (named according to order of steps)
"""
from tqdm.auto import tqdm
# extract spot expression and coordinates
coords = adata_vis.obsm["spatial"].copy() * sp_img_scaling_fac
s_ind = sample_ids.isin([sample_id])
sel_clust_df = spot_factors_df.loc[s_ind, sel_clust_col]
sel_coords = coords[s_ind, :]
sample_id = sample_ids[s_ind]
if sc_img is None:
# create a black background image
xy = sel_coords.max(0) + sel_coords.max(0) * 0.05
sc_img = np.zeros((int(xy[1]), int(xy[0]), 3))
if sp_img is None:
# create a black background image
xy = sel_coords.max(0) + sel_coords.max(0) * 0.05
sp_img = np.zeros((int(xy[1]), int(xy[0]), 3))
img_alpha = 1
img_alpha_seq = 1
else:
img_alpha_seq = interpolate_coord(
start=0, end=img_alpha, steps=step_n[3] + 1, accel_power=sc_power, accelerate=True, jitter=None
)
# extract umap coordinates
umap_coord = adata.obsm["X_umap"].copy()
# make positive and rescale to fill the image
umap_coord[:, 0] = umap_coord[:, 0] + abs(umap_coord[:, 0].min()) + abs(umap_coord[:, 0].max()) * 0.01
umap_coord[:, 1] = -umap_coord[:, 1] # flip y axis
umap_coord[:, 1] = umap_coord[:, 1] + abs(umap_coord[:, 1].min()) + abs(umap_coord[:, 1].max()) * 0.01
if crop_x is None:
img_width = sc_img.shape[0] * 0.99
x_offset = 0
umap_coord[:, 0] = umap_coord[:, 0] / umap_coord[:, 0].max() * img_width
else:
img_width = abs(crop_x[0] - crop_x[1]) * 0.99
x_offset = np.array(crop_x).min()
umap_coord[:, 0] = umap_coord[:, 0] / umap_coord[:, 0].max() * img_width
umap_coord[:, 0] = umap_coord[:, 0] + x_offset
if crop_y is None:
img_height = sc_img.shape[1] * 0.99
y_offset = 0
# y_offset2 = 0
umap_coord[:, 1] = umap_coord[:, 1] / umap_coord[:, 1].max() * img_height
else:
img_height = abs(crop_y[0] - crop_y[1]) * 0.99
y_offset = np.array(crop_y).min()
# y_offset2 = sp_img.shape[1] - np.array(crop_y).max()
umap_coord[:, 1] = umap_coord[:, 1] / umap_coord[:, 1].max() * img_height
umap_coord[:, 1] = umap_coord[:, 1] + y_offset
if cell_fact_df is None:
cell_fact_df = pd.get_dummies(adata.obs[adata_cluster_col], columns=[adata_cluster_col])
cell_fact_df = cell_fact_df[sel_clust]
cell_fact_df.columns = cell_fact_df.columns.tolist()
cell_fact_df["other"] = (cell_fact_df.sum(1) == 0).astype(np.int64)
# compute average position weighted by cell density
aver_coord = pd.DataFrame()
for c in cell_fact_df.columns:
dens = cell_fact_df[c].values
dens = dens / dens.sum(0)
aver = np.array((umap_coord * dens.reshape((cell_fact_df.shape[0], 1))).sum(0))
aver_coord_1 = pd.DataFrame(aver.reshape((1, 2)), index=[c], columns=["x", "y"])
aver_coord_1["column"] = c
aver_coord = | pd.concat([aver_coord, aver_coord_1]) | pandas.concat |
from __future__ import annotations
from dataclasses import dataclass, field
from fractions import Fraction
from typing import List, Union, Iterable
import numpy as np
import pandas as pd
from reamber.base.RAConst import RAConst
MAX_DENOMINATOR = 100
@dataclass
class BpmChangeSnap:
bpm: float
measure: int
beat: int
slot: Union[Fraction, float]
beats_per_measure: Union[Fraction, float]
@property
def beat_length(self) -> float:
return RAConst.MIN_TO_MSEC / self.bpm
@dataclass
class BpmChangeOffset:
bpm: float
beats_per_measure: Union[Fraction, float]
offset: float
@property
def beat_length(self) -> float:
return RAConst.MIN_TO_MSEC / self.bpm
@dataclass
class BpmChange:
bpm: float
beats_per_measure: Union[Fraction, float]
offset: float
measure: int
beat: int
slot: Fraction
@property
def beat_length(self) -> float:
return RAConst.MIN_TO_MSEC / self.bpm
class TimingMap:
initial_offset: float
bpm_changes: List[BpmChange] = field(default_factory=lambda x: [])
slotter: TimingMap.Slotter
prev_divisions: tuple
def __init__(self,
initial_offset: float,
bpm_changes: List[BpmChange]):
self.initial_offset = initial_offset
self.bpm_changes = bpm_changes
# noinspection PyTypeChecker
self.slotter = None
@staticmethod
def time_by_offset(initial_offset: float,
bpm_changes_offset: List[BpmChangeOffset]) -> TimingMap:
bpm_changes_offset.sort(key=lambda x: x.offset)
bpm_changes_snap = []
curr_measure = 0
for i, j in zip(bpm_changes_offset[:-1], bpm_changes_offset[1:]):
diff_offset = j.offset - i.offset
diff_beat = Fraction(diff_offset / i.beat_length).limit_denominator(100)
""" 3 cases
1) No Change
2) J is in same measure
3) J is in different measure
"""
if diff_beat % i.beats_per_measure == 0:
# Case 1
bpm_changes_snap.append(BpmChangeSnap(bpm=i.bpm,
beats_per_measure=i.beats_per_measure,
measure=curr_measure,
beat=0,
slot=Fraction(0)))
curr_measure += int(diff_beat // i.beats_per_measure)
elif diff_beat < i.beats_per_measure:
# Case 2
bpm_changes_snap.append(BpmChangeSnap(bpm=i.bpm,
beats_per_measure=Fraction(diff_beat)
.limit_denominator(MAX_DENOMINATOR),
measure=curr_measure,
beat=0,
slot=Fraction(0)))
curr_measure += 1
else:
# Case 3
# We append the original first
bpm_changes_snap.append(BpmChangeSnap(bpm=i.bpm,
beats_per_measure=i.beats_per_measure,
measure=curr_measure,
beat=0,
slot=Fraction(0)))
curr_measure += int(diff_beat // i.beats_per_measure)
# Then we append the corrector
beats_per_measure = Fraction(diff_beat % i.beats_per_measure).limit_denominator(MAX_DENOMINATOR)
if beats_per_measure:
bpm_changes_snap.append(BpmChangeSnap(bpm=i.bpm,
beats_per_measure=beats_per_measure,
measure=curr_measure,
beat=0,
slot=Fraction(0)))
curr_measure += 1
# This algorithm pivots on the snap algorithm.
bpm_changes_snap.append(BpmChangeSnap(bpm=bpm_changes_offset[-1].bpm,
beats_per_measure=bpm_changes_offset[-1].beats_per_measure,
measure=curr_measure,
beat=0,
slot=Fraction(0)))
tm = TimingMap.time_by_snap(initial_offset=initial_offset,
bpm_changes_snap=bpm_changes_snap)
# tm._force_bpm_measure()
return tm
@staticmethod
def time_by_snap(initial_offset,
bpm_changes_snap: List[BpmChangeSnap]) -> TimingMap:
""" Creates a Timing Map using the BPM Changes provided.
The first BPM Change MUST be on Measure, Beat, Slot 0.
:param initial_offset: The offset of the first measure.
:param bpm_changes_snap: A List of BPM Changes of BpmChangeSnap Class.
:return:
"""
bpm_changes_snap.sort(key=lambda x: (x.measure, x.beat, x.slot))
beats_per_measure = TimingMap._beats_per_measure_snap(bpm_changes_snap)
initial = bpm_changes_snap[0]
assert initial.measure == 0 and \
initial.beat == 0 and \
initial.slot == 0,\
f"The first bpm must be on Measure 0, Beat 0, Slot 0. " \
f"It is now {bpm_changes_snap[0].measure}, {bpm_changes_snap[0].beat}, {bpm_changes_snap[0].slot}"
bpm_changes = [BpmChange(initial.bpm, initial.beats_per_measure, initial_offset,
0, 0, Fraction(0))]
prev_offset = initial_offset
prev_bpm = bpm_changes_snap[0].bpm
prev_beat = Fraction(0)
prev_slot = Fraction(0)
prev_measure = 0
for bpm in bpm_changes_snap[1:]:
measure = bpm.measure
beat = bpm.beat
slot = bpm.slot
"""
0 1
[---|---] [---|---|---]
[---|---|---|---]
<-A-><-------B-------><---C--->
"""
# This is the A, C buffer
# <---------------------------------A-----------------------------> + <----C---->
diff_beats = beats_per_measure[prev_measure] - prev_beat - 1 + (1 - prev_slot) + beat + slot
for i in range(prev_measure + 1, measure):
# This is the B buffer
diff_beats += beats_per_measure[i]
for i in range(measure, prev_measure + 1):
# This is the inverse B buffer
# This happens when the measures are the same, so this corrects the above formula.
diff_beats -= beats_per_measure[i]
offset = prev_offset + diff_beats * RAConst.MIN_TO_MSEC / prev_bpm
bpm_changes.append(BpmChange(bpm.bpm, bpm.beats_per_measure, offset, bpm.measure, bpm.beat, bpm.slot))
prev_measure = measure
prev_offset = offset
prev_bpm = bpm.bpm
prev_beat = beat
prev_slot = slot
tm = TimingMap(initial_offset=initial_offset,
bpm_changes=bpm_changes)
return tm
@staticmethod
def _beats_per_measure_snap(bpm_changes_snap: List[BpmChangeSnap]):
""" This function simulates the beats_per_measure and generates a list of beats per measure
used for timing_by_snap. """
prev_beats = bpm_changes_snap[0].beats_per_measure
prev_measure = 0
beats_per_measure = []
# We process the number of beats first
for b in bpm_changes_snap[1:]:
# Note that beat changes can only happen on measures, which makes sense logically.
measure = b.measure
beats = b.beats_per_measure
# For each difference in measure, we append the beats
diff_measure = measure - prev_measure
for _ in range(diff_measure):
beats_per_measure.append(prev_beats)
prev_beats = beats
prev_measure = measure
# If last, we push the last beat change
beats_per_measure.append(prev_beats)
return beats_per_measure
def _force_bpm_measure(self):
""" This function forces the bpms to be on measures, this is better supported on all VSRGs.
However, this will irreversibly remove all BPM sub-measure BPM Changes.
All note data will be re-snapped.
The algorithm has 3 parts.
1) Group By Measure
2) Add Beat 0, Slot 0s and Calculate Offset
3) Force Measures
"""
# Group By Measures
measures = {} # Measure: {BPMs}
for b in self.bpm_changes:
if b.measure not in measures.keys():
measures[b.measure] = [b]
else:
measures[b.measure].append(b)
prev_bpm = None
# Here, we make sure that every measure with a bpm change has a beat=0, slot=0
for e, bpms in enumerate(measures.values()):
if bpms[0].beat != 0 or bpms[0].slot != 0:
diff_beat = (bpms[0].measure - prev_bpm.measure - 1) * prev_bpm.beats_per_measure + \
(prev_bpm.beats_per_measure - prev_bpm.beat - prev_bpm.slot)
bpms.insert(0, BpmChange(bpm=prev_bpm.bpm,
beats_per_measure=bpms[0].beats_per_measure,
offset=diff_beat * prev_bpm.beat_length + prev_bpm.offset,
measure=bpms[0].measure,
beat=0,
slot=Fraction(0)))
prev_bpm = bpms[-1]
# Separate into measures
measure_push = 0
for m, bpms in measures.items():
bpms: List[BpmChange]
beats_per_measure = bpms[0].beats_per_measure
prev_beat = beats_per_measure
for b in bpms:
b.measure += measure_push
for i in reversed(range(len(bpms))):
# This will be run in reverse (it's easier)
b = bpms[i]
# The "beat" here is including the fraction slot/snap
beat = b.beat + b.slot
diff_beats = prev_beat - beat # The number of beats_per_measure for this
if diff_beats < 1:
b.bpm *= 1 / float(diff_beats)
diff_beats = 1
b.beats_per_measure = diff_beats
b.beat = 0
b.slot = Fraction(0)
prev_beat = beat
if beat != 0:
measure_push += 1
for j in range(i, len(bpms)):
bpms[j].measure += 1
# Reassign
self.bpm_changes = [i for j in measures.values() for i in j]
def offsets(self,
measures: Union[List, int],
beats: Union[List, int],
slots: Union[List[Fraction], Fraction]) -> List[float]:
""" Finds the offsets in ms for the specified snaps
:param measures: List of Measures or measure, in integers
:param beats: List of Beats or beat in integers
:param slots: List of Slots or slot in Fraction.
:return: List[float]
"""
measures = [measures] if isinstance(measures, int) else measures
beats = [beats] if isinstance(beats, int) else beats
slots = [slots] if isinstance(slots, (Fraction, float, int)) else slots
offsets = []
for measure, beat, slot in zip(measures, beats, slots):
for b in reversed(self.bpm_changes):
if b.measure > measure:
# If the measure is more, it's definitely not it
continue
if b.measure == measure and b.beat + b.slot > beat + slot:
# If the measure is the same, we check if its beat + slot is more
continue
diff_measure = measure - b.measure
diff_beat = beat - b.beat
diff_slot = slot - b.slot
offsets.append(
b.offset +
(diff_measure * b.beats_per_measure + diff_beat + diff_slot) * b.beat_length)
break
return offsets
def snaps(self,
offsets: Union[Iterable[float], float],
divisions: Iterable[int] = (1,2,3,4,5,6,7,8,9,10,12,16,32,48,64,96),
transpose: bool = False) -> List[List[int], List[int], List[Fraction]]:
""" Finds the snaps from the provided offsets
:param offsets: Offsets to find snaps
:param divisions: Divisions for the snap to conform to.
:param transpose: Transposes the returned List
:return: List[Tuple(Measure), Tuple(Beat), Tuple(Slot)] if transpose List[Tuple(Measure, Beat, Slot)]
"""
snaps = [[], [], []]
offsets = [offsets] if isinstance(offsets, (int, float)) else offsets
if not self.slotter or self.prev_divisions != divisions:
# noinspection PyTypeChecker
self.prev_divisions = divisions
self.slotter = TimingMap.Slotter(divisions)
# This is required as the TimingMap modulus is prone to rounding errors
# e.g. 3.9999 -> measure 3, beat 4, snap 191/192
# This will correct it to 4.0 without exceeding to snap 1/192
DIVISION_CORRECTION = 0.001
for offset in offsets:
for b in reversed(self.bpm_changes):
if b.offset > offset: continue
diff_offset = offset - b.offset
beats_total = diff_offset / b.beat_length + DIVISION_CORRECTION
measure = int(beats_total // b.beats_per_measure)
beat = int(beats_total - measure * b.beats_per_measure)
slot = self.slotter.slot(beats_total % 1)
snaps[0].append(b.measure + measure)
snaps[1].append(b.beat + beat)
snaps[2].append(b.slot + slot)
break
return list(zip(*snaps)) if transpose else snaps
def snap_objects(self,
offsets: Iterable[float],
objects: Iterable[object]):
a = pd.DataFrame([*self.snaps(offsets), objects]).T
a.columns = ['measure', 'beat', 'slot', 'obj']
a.measure = | pd.to_numeric(a.measure) | pandas.to_numeric |
from context import dero
import dero.data.ff.create.sort as ff_sort
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
import datetime
class DataFrameTest:
df_3_fac = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, .51, 1000),
(10516, 'a', '1/2/2000', 1.02, .52, 2000),
(10516, 'a', '1/3/2000', 1.03, .53, 3000),
(10516, 'a', '1/4/2000', 1.04, .54, 4000),
(10516, 'b', '1/1/2000', 1.05, 1.55, 50000),
(10516, 'b', '1/2/2000', 1.06, 1.56, 60000),
(10516, 'b', '1/3/2000', 1.07, 1.57, 70000),
(10516, 'b', '1/4/2000', 1.08, 1.58, 80000),
(10517, 'a', '1/1/2000', 1.09, .59, 9000),
(10517, 'a', '1/2/2000', 1.10, .60, 10000),
(10517, 'a', '1/3/2000', 1.11, .61, 11000),
(10517, 'a', '1/4/2000', 1.12, .62, 12000),
(10517, 'b', '1/1/2000', 1.13, .63, 13000),
(10517, 'b', '1/2/2000', 1.14, .64, 14000),
(10517, 'b', '1/3/2000', 1.15, .65, 15000),
(10517, 'b', '1/4/2000', 1.16, .66, 16000),
(10518, 'a', '1/1/2000', 1.17, .67, 17000),
(10518, 'a', '1/2/2000', 1.18, .68, 18000),
(10518, 'a', '1/3/2000', 1.19, .69, 19000),
(10518, 'a', '1/4/2000', 1.20, .70, 20000),
(10518, 'b', '1/1/2000', 1.21, .71, 21000),
(10518, 'b', '1/2/2000', 1.22, .72, 22000),
(10518, 'b', '1/3/2000', 1.23, .73, 23000),
(10518, 'b', '1/4/2000', 1.24, .74, 24000),
], columns=['PERMNO', 'byvar', 'Date', 'RET', 'be/me', 'me'])
df_3_fac['Date'] = pd.to_datetime(df_3_fac['Date'])
class TestCalculateFFFactors(DataFrameTest):
def test_create_portfolios(self):
expect_df = pd.DataFrame(data=[
(10516, 'a', Timestamp('2000-01-01 00:00:00'), 1.01, 0.51, 1000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-02 00:00:00'), 1.02, 0.52, 2000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-03 00:00:00'), 1.03, 0.53, 3000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-04 00:00:00'), 1.04, 0.54, 4000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-01 00:00:00'), 1.05, 1.55, 50000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-02 00:00:00'), 1.06, 1.56, 60000, 3, | Timestamp('2000-01-01 00:00:00') | pandas.Timestamp |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = | Series(vals1) | pandas.Series |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = | pd.DataFrame({"test": []}, index=[]) | pandas.DataFrame |
"""
A classification model build with PyTorch, mediapipe and
openCV for live gesture detection from webcam
"""
# import dependencies
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import mediapipe as mp
import cv2
import time
import os
import traceback
from datetime import datetime
import pickle
import torch
from torch._C import device
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
class GestureDataset(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __len__(self):
return len(self.y_data)
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
class GestureClassifierNet(nn.Module):
def __init__(self, num_feature, num_class):
super(GestureClassifierNet, self).__init__()
self.layer_1 = nn.Linear(num_feature, 512)
self.layer_2 = nn.Linear(512, 128)
self.layer_3 = nn.Linear(128, 64)
self.layer_out = nn.Linear(64, num_class)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
self.batchnorm1 = nn.BatchNorm1d(512)
self.batchnorm2 = nn.BatchNorm1d(128)
self.batchnorm3 = nn.BatchNorm1d(64)
def forward(self, x):
x = self.layer_1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.layer_2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_3(x)
x = self.batchnorm3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
class GestureDetector(object):
def __init__(self, dataset_file="hands-coords.csv"):
"""
Gesture Detector Constructor
:param dataset_file : name of the dataset file located in /data
"""
dataset_path = os.path.join('data', dataset_file)
self.df = pd.read_csv(dataset_path)
# Encode the class
self.le = LabelEncoder()
self.le.fit(self.df['class'])
self.df['class_encoded'] = self.le.transform(self.df['class'])
self.X = self.df.drop(['class', 'class_encoded'], axis=1) # features
self.y = self.df['class_encoded'] # target value
# Data Preprocessing and Scaling fit
self.dataset_setup()
'''
Model Definitions
'''
self.EPOCHS = 100
self.BATCH_SIZE = 16
self.LEARNING_RATE = 0.0007
self.NUM_FEATURES = len(self.X.columns)
self.NUM_CLASSES = max(self.y) + 1
self.accuracy_stats = {
'train': [],
"val": []
}
self.loss_stats = {
'train': [],
"val": []
}
self.test_accuracy = 0
self.test_loss = 0
self.detection_model_name = 'torch-mc' # detected model name
self.detection_model = None # detection model
'''
Other definitions
'''
self.mp_drawing = mp.solutions.drawing_utils # drawing helper
self.mp_hands = mp.solutions.hands # hands solutions
self.hand = self.mp_hands.Hands(max_num_hands=1) # hands construction
self.landmarks = None # hand landmarks
self.image = None # image for detections
self.detecting = False # whether model is detecting
self.gesture_class = None # detected gesture class
self.gesture_prob = None # gesture probs
self.best_prob = None # detected gesture prob
def get_data_summary(self, show=False):
"""
Data summary displayer
:param show: if True will print the info and vice versa
"""
data_shape = self.df.shape
df_count = self.df.groupby(self.df['class']).count()
train_test_shape = {
"X_train shape": self.X_train.shape,
"y_train shape": self.y_train.shape,
"X_test shape": self.X_test.shape,
"y_test shape": self.y_test.shape
}
if show:
print(f"data shape: {data_shape}")
for key, val in train_test_shape.items():
print(f"\t{key}: {val}")
print(df_count)
return data_shape, train_test_shape, df_count
def dataset_setup(self):
"""
Dataset Preprocessing and Transform
"""
# Divide it to trainval and test splits
self.X_trainval, self.X_test, \
self.y_trainval, self.y_test = train_test_split(
self.X,
self.y,
stratify=self.y,
test_size=0.3,
random_state=69
)
# Split train into train-val
self.X_train, self.X_val, \
self.y_train, self.y_val = train_test_split(
self.X_trainval,
self.y_trainval,
test_size=0.1,
stratify=self.y_trainval,
random_state=21
)
# Scale the data
self.scaler = StandardScaler()
self.X_train = self.scaler.fit_transform(self.X_train)
self.X_val = self.scaler.transform(self.X_val)
self.X_test = self.scaler.transform(self.X_test)
self.X_train, self.y_train = np.array(self.X_train), np.array(self.y_train)
self.X_val, self.y_val = np.array(self.X_val), np.array(self.y_val)
self.X_test, self.y_test = np.array(self.X_test), np.array(self.y_test)
self.train_dataset = GestureDataset(torch.from_numpy(self.X_train).float(), torch.from_numpy(self.y_train).long())
self.val_dataset = GestureDataset(torch.from_numpy(self.X_val).float(), torch.from_numpy(self.y_val).long())
self.test_dataset = GestureDataset(torch.from_numpy(self.X_test).float(), torch.from_numpy(self.y_test).long())
def set_weighted_sampling(self):
"""
Weighted sampling the training datasets
"""
def get_class_distribution(obj, max_num_class):
count_dict = {}
for i in range(max_num_class+1):
count_dict[i] = 0
for i in obj:
count_dict[i] += 1
return count_dict
target_list = []
for _, t in self.train_dataset:
target_list.append(t)
target_list = torch.tensor(target_list)
target_list = target_list[torch.randperm(len(target_list))]
class_count = [i for i in get_class_distribution(self.y_train, int(max(target_list))).values()]
class_weights = 1./torch.tensor(class_count, dtype=torch.float)
self.class_weights_all = class_weights[target_list]
self.weighted_sampler = WeightedRandomSampler(
weights=self.class_weights_all,
num_samples=len(self.class_weights_all),
replacement=True
)
def model_setup(self):
"""
Model and Data Loader Setup
"""
print("MODEL SETUP STARTED")
self.detection_model = GestureClassifierNet(num_feature = self.NUM_FEATURES, num_class = self.NUM_CLASSES)
self.set_weighted_sampling()
self.train_loader = DataLoader(
dataset=self.train_dataset,
batch_size=self.BATCH_SIZE,
sampler=self.weighted_sampler,
drop_last=True
)
self.val_loader = DataLoader(dataset=self.val_dataset, batch_size=1)
self.test_loader = DataLoader(dataset=self.test_dataset, batch_size=1)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.detection_model.parameters(), lr=self.LEARNING_RATE)
print("MODEL SETUP FINISHED")
def multi_acc(self, y_pred, y_test):
"""
Accuracy Function
"""
y_pred_softmax = torch.log_softmax(y_pred, dim = 1)
_, y_pred_tags = torch.max(y_pred_softmax, dim = 1)
correct_pred = (y_pred_tags == y_test).float()
acc = correct_pred.sum() / len(correct_pred)
acc = torch.round(acc * 100)
return acc
def load_model_from_file(self, from_file=False, file_name=None):
"""
Loads the detection model from file
:param from_file: if True loads from file, if False passes
:param file_name: model's file name
"""
if from_file and file_name is not None:
file_path = os.path.join('models', file_name)
with open(file_path, 'rb') as f:
self.detection_model = pickle.load(f)
print("Model Loaded")
def save_model(self):
"""
Saves the best detection model
"""
now = datetime.now()
current_time = now.strftime("%H-%M-%S-%d-%m-%Y")
file_name = str(self.detection_model_name + "-" + current_time)
file_path = os.path.join('models', file_name)
with open(file_path, 'wb') as f:
pickle.dump(self.detection_model, f)
def train(self):
"""
Sets up the model and trains it over the training and validation dataset
"""
print("Training Started")
# Model Setup
self.model_setup()
# Training
for e in range(self.EPOCHS):
train_epoch_loss = 0
train_epoch_acc = 0
# TRAINING
self.detection_model.train()
for X_train_batch, y_train_batch in self.train_loader:
self.optimizer.zero_grad()
y_train_pred = self.detection_model(X_train_batch)
train_loss = self.criterion(y_train_pred, y_train_batch)
train_acc = self.multi_acc(y_train_pred, y_train_batch)
train_loss.backward()
self.optimizer.step()
train_epoch_loss += train_loss.item()
train_epoch_acc += train_acc.item()
# VALIDATION
with torch.no_grad():
val_epoch_loss = 0
val_epoch_acc = 0
self.detection_model.eval()
for X_val_batch, y_val_batch in self.val_loader:
y_val_pred = self.detection_model(X_val_batch)
val_loss = self.criterion(y_val_pred, y_val_batch)
val_acc = self.multi_acc(y_val_pred, y_val_batch)
val_epoch_loss += val_loss.item()
val_epoch_acc += val_acc.item()
self.loss_stats['train'].append(train_epoch_loss/len(self.train_loader))
self.loss_stats['val'].append(val_epoch_loss/len(self.val_loader))
self.accuracy_stats['train'].append(train_epoch_acc/len(self.train_loader))
self.accuracy_stats['val'].append(val_epoch_acc/len(self.val_loader))
if e%10==0:
print(f'Epoch {e+0:03}: | Train Loss: {train_epoch_loss/len(self.train_loader):.5f} | Val Loss: {val_epoch_loss/len(self.val_loader):.5f} | Train Acc: {train_epoch_acc/len(self.train_loader):.3f}| Val Acc: {val_epoch_acc/len(self.val_loader):.3f}')
print(f"Training Status: SUCCESSFUL")
self.save_model()
def evaluate(self):
"""
Evaluates Model
"""
y_pred_list = []
with torch.no_grad():
test_loss=0
test_acc=0
self.detection_model.eval()
for X_batch, Y_batch in self.test_loader:
y_test_pred = self.detection_model(X_batch)
_, y_pred_tags = torch.max(y_test_pred, dim = 1)
y_pred_list.append(y_pred_tags.cpu().numpy())
test_it_loss = self.criterion(y_test_pred, Y_batch)
test_it_acc = self.multi_acc(y_test_pred, Y_batch)
test_loss += test_it_loss.item()
test_acc += test_it_acc.item()
self.test_loss = (test_loss/len(self.test_loader))
self.test_accuracy = (test_acc/len(self.test_loader))
# Create dataframes
train_val_acc_df = pd.DataFrame.from_dict(self.accuracy_stats).reset_index().melt(id_vars=['index']).rename(columns={"index":"epochs"})
train_val_loss_df = | pd.DataFrame.from_dict(self.loss_stats) | pandas.DataFrame.from_dict |
"""Convert EURECOM data dump file into a train sets.
This exact code is only suitable for text only as it will drop other labels
and will drop duplicate objects that can have different images.
"""
import ast
import os
import numpy as np
import pandas as pd
import langid
from sklearn.model_selection import train_test_split
RANDOM_NUMBER = 621323849
RANDOM_NUMBER2 = 581085259
FNAME = "data/total_post.csv"
COLS = [
"museum",
"text",
"place_country_code",
"time_label",
"technique_group",
"material_group",
"img",
"type_a_group",
]
ALL_COLS = ["obj"] + COLS
LABEL_COLS = [
"place_country_code",
"time_label",
"technique_group",
"material_group",
]
MIN_CHARS = 10
MIN_LABEL_COUNT = 50
def converter_func(x):
if not x:
return np.nan
elif not x.strip():
return np.nan
elif x.strip() == "[nan]":
return np.nan
try:
x = ast.literal_eval(x)
if type(x) == list:
if len(x) == 0:
return np.nan
elif len(x) != 1:
return "OTHER"
else:
return x[0].strip()
return x
except:
return x.strip()
def converter_func_img(x):
if not x:
return np.nan
elif not x.strip():
return np.nan
elif x.strip() == "[nan]":
return np.nan
try:
x = ast.literal_eval(x)
if type(x) == list:
if len(x) == 0:
return np.nan
x = [u.strip() for u in x]
return ",".join(x)
except:
return x.strip()
def merge_text(x):
if not x:
return np.nan
if | pd.isna(x) | pandas.isna |
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
abs_error,
cross_entropy,
explain_prediction,
explain_predictions,
explain_predictions_best_worst
)
from evalml.problem_types import ProblemTypes
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
test_features = [[1], np.ones((15, 1)), pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}).iloc[0],
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame()]
@pytest.mark.parametrize("test_features", test_features)
def test_explain_prediction_value_error(test_features):
with pytest.raises(ValueError, match="features must be stored in a dataframe or datatable with exactly one row."):
explain_prediction(None, input_features=test_features, training_data=None)
explain_prediction_answer = """Feature Name Feature Value Contribution to Prediction
=========================================================
d 40.00 +++++
b 20.00 -----""".splitlines()
explain_prediction_regression_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": None
}]
}
explain_predictions_regression_df_answer = pd.DataFrame({'feature_names': ['d', 'b'],
'feature_values': [40, 20],
'qualitative_explanation': ['+++++', '-----'],
"quantitative_explanation": [None, None]})
explain_prediction_binary_dict_answer = {
"explanations": [{
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": "class_1"
}]
}
explain_prediction_binary_df_answer = pd.DataFrame({
"feature_names": ["d", "b"],
"feature_values": [40, 20],
"qualitative_explanation": ["+++++", "-----"],
"quantitative_explanation": [None, None],
"class_name": ["class_1", "class_1"]
})
explain_prediction_multiclass_answer = """Class: class_0
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++++
c 30.00 ---
Class: class_1
Feature Name Feature Value Contribution to Prediction
=========================================================
a 10.00 +++
b 20.00 ++
Class: class_2
Feature Name Feature Value Contribution to Prediction
=========================================================
c 30.00 ---
d 40.00 ---
""".splitlines()
explain_prediction_multiclass_dict_answer = {
"explanations": [
{"feature_names": ["a", "c"],
"feature_values": [10, 30],
"qualitative_explanation": ["+++++", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_0"},
{"feature_names": ["a", "b"],
"feature_values": [10, 20],
"qualitative_explanation": ["+++", "++"],
"quantitative_explanation": [None, None],
"class_name": "class_1"},
{"feature_names": ["c", "d"],
"feature_values": [30, 40],
"qualitative_explanation": ["---", "---"],
"quantitative_explanation": [None, None],
"class_name": "class_2"},
]
}
explain_prediction_multiclass_df_answer = pd.DataFrame({
"feature_names": ["a", "c", "a", "b", "c", "d"],
"feature_values": [10, 30, 10, 20, 30, 40],
"qualitative_explanation": ["+++++", "---", "+++", "++", "---", "---"],
"quantitative_explanation": [None, None, None, None, None, None],
"class_name": ['class_0', 'class_0', 'class_1', 'class_1', 'class_2', 'class_2']
})
@pytest.mark.parametrize("problem_type, output_format, shap_values, normalized_shap_values, answer",
[(ProblemTypes.REGRESSION,
"text",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_answer),
(ProblemTypes.REGRESSION,
"dict",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_prediction_regression_dict_answer
),
(ProblemTypes.REGRESSION,
"dataframe",
{"a": [1], "b": [-2.1], "c": [-0.25], "d": [2.3]},
{"a": [0.5], "b": [-2.1], "c": [-0.25], "d": [2.3]},
explain_predictions_regression_df_answer
),
(ProblemTypes.BINARY,
"text",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_answer),
(ProblemTypes.BINARY,
"dict",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_dict_answer),
(ProblemTypes.BINARY,
"dataframe",
[{}, {"a": [0.5], "b": [-0.89], "c": [0.33], "d": [0.89]}],
[{}, {"a": [0.5], "b": [-0.89], "c": [-0.25], "d": [0.89]}],
explain_prediction_binary_df_answer),
(ProblemTypes.MULTICLASS,
"text",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_answer),
(ProblemTypes.MULTICLASS,
"dict",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_dict_answer),
(ProblemTypes.MULTICLASS,
"dataframe",
[{}, {}, {}],
[{"a": [1.1], "b": [0.09], "c": [-0.53], "d": [-0.06]},
{"a": [0.53], "b": [0.24], "c": [-0.15], "d": [-0.22]},
{"a": [0.03], "b": [0.02], "c": [-0.42], "d": [-0.47]}],
explain_prediction_multiclass_df_answer)
])
@pytest.mark.parametrize("input_type", ["pd", "ww"])
@patch("evalml.model_understanding.prediction_explanations._user_interface._compute_shap_values")
@patch("evalml.model_understanding.prediction_explanations._user_interface._normalize_shap_values")
def test_explain_prediction(mock_normalize_shap_values,
mock_compute_shap_values,
problem_type, output_format, shap_values, normalized_shap_values, answer,
input_type):
mock_compute_shap_values.return_value = shap_values
mock_normalize_shap_values.return_value = normalized_shap_values
pipeline = MagicMock()
pipeline.problem_type = problem_type
pipeline.classes_ = ["class_0", "class_1", "class_2"]
# By the time we call transform, we are looking at only one row of the input data.
pipeline.compute_estimator_features.return_value = ww.DataTable(pd.DataFrame({"a": [10], "b": [20], "c": [30], "d": [40]}))
features = pd.DataFrame({"a": [1], "b": [2]})
training_data = pd.DataFrame()
if input_type == "ww":
features = ww.DataTable(features)
training_data = ww.DataTable(training_data)
table = explain_prediction(pipeline, features, output_format=output_format, top_k=2, training_data=training_data)
if isinstance(table, str):
compare_two_tables(table.splitlines(), answer)
elif isinstance(table, pd.DataFrame):
pd.testing.assert_frame_equal(table, answer)
else:
assert table == answer
def test_error_metrics():
pd.testing.assert_series_equal(abs_error(pd.Series([1, 2, 3]), | pd.Series([4, 1, 0]) | pandas.Series |
from pandas import DataFrame, notnull, isnull
from pandapower.topology import create_nxgraph, connected_component
def estimate_voltage_vector(net):
"""
Function initializes the voltage vector of net with a rough estimation. All buses are set to the
slack bus voltage. Transformer differences in magnitude and phase shifting are accounted for.
:param net: pandapower network
:return: pandas dataframe with estimated vm_pu and va_degree
"""
res_bus = | DataFrame(index=net.bus.index, columns=["vm_pu", "va_degree"]) | pandas.DataFrame |
import logging
import re
from datetime import datetime as dt
from datetime import timedelta as delta
import exchangelib as ex
import pandas as pd
from exchangelib import (DELEGATE, Account, Configuration, Credentials,
FaultTolerance)
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.config import AZURE_WEB
from smseventlog.utils import fileops as fl
from smseventlog.utils.credentials import CredentialManager
# silence exchangelib naive datetime on last_modified_time info log
logging.getLogger('exchangelib.fields').setLevel(logging.WARNING)
log = getlog(__name__)
class ExchangeAccount():
def __init__(self, gui=False, login=True):
_exch = None
_fldr_root, _wo_folder = None, None
cred_manager = CredentialManager(name='exchange', gui=gui)
f.set_self(vars(), exclude='login')
if login:
self.login()
@property
def exchange(self):
# exchangelib account object
if self._exch is None:
self._exch = self.create_account()
return self._exch
def login(self):
self._exch = self.create_account()
def create_config(self, credentials, m_config=None):
if m_config is None:
# failed once, use hardcoded vals
service_endpoint = 'https://outlook.office365.com/EWS/Exchange.asmx'
auth_type = 'basic'
version = None
else:
service_endpoint = m_config.get('ews_url', None)
auth_type = m_config.get('ews_auth_type', None)
version = m_config.get('ews_version', None)
config = Configuration(
retry_policy=FaultTolerance(max_wait=40),
credentials=credentials,
service_endpoint=service_endpoint,
auth_type=auth_type,
version=version)
return config
def create_account(self, failcount=0, config=None, autodiscover=None):
email, password = self.cred_manager.load()
credentials = Credentials(username=email, password=password)
# first try to load saved config from QSettings
keys = ('ews_url', 'ews_auth_type', 'ews_version')
m = self.cred_manager.load_multi(keys=keys)
# don't need to autodiscover if already have saved settings
if autodiscover is None:
autodiscover = True if m.get('ews_url', None) is None else False
if config is None:
config = self.create_config(credentials=credentials, m_config=m)
try:
account = Account(
primary_smtp_address=email,
config=config,
autodiscover=autodiscover,
access_type=DELEGATE) # important to be delegate, otherwise tries 'Impersonate' > doesnt work
self.save_account_settings(account=account)
except:
log.warning(f'Failed creating account: {failcount}')
failcount += 1
if failcount == 1:
# on first fail, need to retry with manual credentials
config = self.create_config(credentials=credentials) # use hardcoded
account = self.create_account(failcount=failcount, config=config, autodiscover=False)
elif failcount <= 2:
account = self.create_account(failcount=failcount)
else:
return None
return account
def save_account_settings(self, account):
if AZURE_WEB:
return
m = dict(
ews_url=account.protocol.service_endpoint,
ews_auth_type=account.protocol.auth_type,
ews_version=account.version)
self.cred_manager.save_multi(vals=m)
@property
def fldr_root(self):
if self._fldr_root is None:
self._fldr_root = self.exchange.root / 'Top of Information Store'
return self._fldr_root
@property
def wo_folder(self):
if self._wo_folder is None:
self._wo_folder = self.fldr_root.glob('WO Request')
return self._wo_folder
def get_wo_from_email(self, unit, title):
tz = ex.EWSTimeZone.localzone()
maxdate = dt.now() + delta(days=-15)
messages = self.wo_folder \
.filter(
datetime_received__range=(
tz.localize(ex.EWSDateTime.from_datetime(maxdate)),
tz.localize(ex.EWSDateTime.now()))) \
.filter(subject__icontains=title) \
.filter(subject__icontains=unit)
expr = re.compile('WO[0-9]{7}', re.IGNORECASE)
for msg in messages.all():
match = re.search(expr, str(msg))
if not match is None:
wo = match.group(0)
return wo
def parse_attachment(attachment, d=None, header=2):
data = fl.from_bytes(attachment.content)
df = pd.read_csv(data, header=header)
df['DateEmail'] = d # only used for dt exclusions email, it doesnt have date field
return df
def combine_email_data(folder, maxdate, subject=None, header=2):
a = ExchangeAccount().exchange
fldr = a.root / 'Top of Information Store' / folder
tz = ex.EWSTimeZone.localzone()
# filter downtime folder to emails with date_received 2 days greater than max shift date in db
fltr = fldr.filter(
datetime_received__range=(
tz.localize(ex.EWSDateTime.from_datetime(maxdate)),
tz.localize(ex.EWSDateTime.now())))
# useful if single folder contains multiple types of emails
if not subject is None:
fltr = fltr.filter(subject__contains=subject)
try:
df = pd.concat([parse_attachment(
item.attachments[0],
header=header,
d=item.datetime_received.date() + delta(days=-1)) for item in fltr])
except:
log.warning('No emails found.')
df = | pd.DataFrame() | pandas.DataFrame |
from flask import render_template,request, url_for, jsonify, redirect, Response, send_from_directory
from app import app
from app import APP_STATIC
from app import APP_ROOT
import json
import numpy as np
import pandas as pd
import os
import re
# from kmapper import KeplerMapper, Cover
from .kmapper import KeplerMapper, Cover
from sklearn import cluster
import networkx as nx
import sklearn
# from sklearn.linear_model import LinearRegression
try:
import statsmodels.api as sm
except:
print('No statsmodel found')
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KernelDensity
from scipy.spatial import distance
from sklearn.cluster import KMeans
import importlib
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.manifold import TSNE
@app.route('/')
@app.route('/MapperInteractive_new')
def index():
return render_template('index.html')
@app.route('/data_process', methods=['POST','GET'])
def process_text_data():
'''
Check for:
1. Missing value
2. Non-numerical elements in numerical cols
3. If cols are non-numerical, check if cols are categorical
'''
text_data = request.get_data().decode('utf-8').splitlines()
cols = text_data[0].split(',')
mat = [n.split(',') for n in text_data] # csv: if an element is empty, it will be "".
newdf1 = np.array(mat)[1:]
rows2delete = np.array([])
cols2delete = []
# ### Delete missing values ###
for i in range(len(cols)):
col = newdf1[:,i]
if np.sum(col == "") >= 0.2*len(newdf1): # if more than 20% elements in this column are empty, delete the whole column
cols2delete.append(i)
else:
rows2delete = np.concatenate((rows2delete, np.where(col=="")[0]))
rows2delete = np.unique(rows2delete).astype("int")
newdf2 = np.delete(np.delete(newdf1, cols2delete, axis=1), rows2delete, axis=0)
cols = [cols[i] for i in range(len(cols)) if i not in cols2delete]
### check if numerical cols ###
cols_numerical_idx = []
cols_categorical_idx = []
cols_others_idx = []
rows2delete = np.array([])
r1 = re.compile(r'^-?\d+(?:\.\d+)?$')
r2 = re.compile(r'[+\-]?[^A-Za-z]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+)') # scientific notation
vmatch = np.vectorize(lambda x:bool(r1.match(x) or r2.match(x)))
for i in range(len(cols)):
col = newdf2[:,i]
col_match = vmatch(col)
if np.sum(col_match) >= 0.8*len(newdf1): # if more than 90% elements can be converted to float, keep the col, and delete rows that cannot be convert to float:
cols_numerical_idx.append(i)
rows2delete = np.concatenate((rows2delete, np.where(col_match==False)[0]))
else:
### check if categorical cols###
if len(np.unique(col)) <= 200: # if less than 10 different values: categorical
cols_categorical_idx.append(i)
else:
cols_others_idx.append(i)
newdf3 = newdf2[:, cols_numerical_idx+cols_categorical_idx+cols_others_idx]
rows2delete = rows2delete.astype(int)
newdf3 = np.delete(newdf3, rows2delete, axis=0)
newdf3_cols = [cols[idx] for idx in cols_numerical_idx+cols_categorical_idx+cols_others_idx]
newdf3 = pd.DataFrame(newdf3)
newdf3.columns = newdf3_cols
# write the data frame
newdf3.to_csv(APP_STATIC+"/uploads/processed_data.csv", index=False)
# write the cols info
cols_numerical = [cols[idx] for idx in cols_numerical_idx]
cols_categorical = [cols[idx] for idx in cols_categorical_idx]
cols_others = [cols[idx] for idx in cols_others_idx]
cols_dict = {'cols_numerical':cols_numerical, 'cols_categorical':cols_categorical, 'cols_others':cols_others}
with open(APP_STATIC+"/uploads/cols_info.json", 'w') as f:
f.write(json.dumps(cols_dict, indent=4))
return jsonify(columns=cols_numerical, categorical_columns=cols_categorical, other_columns=cols_others)
# @app.route('/data_process', methods=['POST','GET'])
# def load_data():
# filename = request.get_data().decode('utf-8').splitlines()[0]
# print(filename)
# df = pd.read_csv(APP_STATIC+"/uploads/"+filename)
# cols = list(df.columns)
# df_0 = df.iloc[0,:]
# cols_numerical_idx = []
# cols_categorical_idx = []
# cols_others_idx = []
# rows2delete = np.array([])
# for i in range(len(cols)):
# c = df_0.iloc[i]
# try:
# float(c)
# cols_numerical_idx.append(i)
# except ValueError:
# cols_categorical_idx.append(i)
# # if isinstance(c,int) or isinstance(c,float):
# # cols_numerical_idx.append(i)
# # else:
# # cols_categorical_idx.append(i)
# df.to_csv(APP_STATIC+"/uploads/processed_data.csv", index=False)
# cols_numerical = [cols[idx] for idx in cols_numerical_idx]
# cols_categorical = [cols[idx] for idx in cols_categorical_idx]
# cols_others = [cols[idx] for idx in cols_others_idx]
# cols_dict = {'cols_numerical':cols_numerical, 'cols_categorical':cols_categorical, 'cols_others':cols_others}
# print(cols_dict)
# with open(APP_STATIC+"/uploads/cols_info.json", 'w') as f:
# f.write(json.dumps(cols_dict, indent=4))
# return jsonify(columns=cols_numerical, categorical_columns=cols_categorical, other_columns=cols_others)
@app.route('/mapper_data_process', methods=['POST','GET'])
def load_mapper_data():
filename = request.get_data().decode('utf-8').splitlines()[0]
with open(APP_STATIC+"/uploads/"+filename) as f:
mapper_graph = json.load(f)
mapper_graph["links"] = mapper_graph["edges"]
del mapper_graph["edges"]
mapper_graph_new = _parse_result(mapper_graph)
connected_components = compute_cc(mapper_graph_new)
return jsonify(mapper=mapper_graph_new, connected_components=connected_components)
@app.route('/mapper_loader', methods=['POST','GET'])
def get_graph():
mapper_data = request.form.get('data')
mapper_data = json.loads(mapper_data)
selected_cols = mapper_data['cols']
all_cols = mapper_data['all_cols'] # all numerical cols
categorical_cols = mapper_data['categorical_cols']
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
data_categorical = data[categorical_cols]
data = data[all_cols]
# data = data[selected_cols].astype("float")
config = mapper_data["config"]
norm_type = config["norm_type"]
clustering_alg = config["clustering_alg"]
clustering_alg_params = config["clustering_alg_params"]
# eps = config["eps"]
# min_samples = config["min_samples"]
#### TODO: update filter_parameters ####
filter_parameters = config
# filter functions
filter_function = config["filter"]
if len(filter_function) == 1:
interval = int(config["interval1"])
overlap = float(config["overlap1"]) / 100
elif len(filter_function) == 2:
interval = [int(config["interval1"]), int(config["interval2"])]
overlap = [float(config["overlap1"])/100, float(config["overlap2"])/100]
print(interval, overlap)
# TODO: fix normalization (only point cloud column needs to be modified?)
# normalization
if norm_type == "none":
pass
elif norm_type == "0-1": # axis=0, min-max norm for each column
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
else:
data = sklearn.preprocessing.normalize(data, norm=norm_type, axis=0, copy=False, return_norm=False)
data = pd.DataFrame(data, columns = all_cols)
mapper_result = run_mapper(data, selected_cols, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters)
if len(categorical_cols) > 0:
for node in mapper_result['nodes']:
print("node", node['id'])
vertices = node['vertices']
data_categorical_i = data_categorical.iloc[vertices]
node['categorical_cols_summary'] = {}
for col in categorical_cols:
node['categorical_cols_summary'][col] = data_categorical_i[col].value_counts().to_dict()
connected_components = compute_cc(mapper_result)
return jsonify(mapper=mapper_result, connected_components=connected_components)
@app.route('/linear_regression', methods=['POST','GET'])
def linear_regression():
json_data = json.loads(request.form.get('data'))
selected_nodes = json_data['nodes']
y_name = json_data['dep_var']
X_names = json_data['indep_vars']
print(y_name, X_names)
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
y = data.loc[:,y_name]
X = data.loc[:,X_names]
X2 = sm.add_constant(X)
reg = sm.OLS(y, X2)
print(y,X2)
result = reg.fit()
ypred = result.predict(X2)
influence = result.get_influence()
std_residuals = influence.resid_studentized_internal
conf_int = np.array(result.conf_int())
conf_int_new = []
for i in range(conf_int.shape[0]):
conf_int_new.append(list(conf_int[i,:]))
print(result.summary())
return jsonify(params=list(result.params), pvalues=list(result.pvalues), conf_int=conf_int_new, stderr=list(result.bse), rsquared=result.rsquared, fvalue=result.f_pvalue, rsquared_adj=result.rsquared_adj, y_actual=list(y), y_predicted=list(ypred), std_residuals=list(std_residuals))
@app.route('/pca', methods=['POST','GET'])
def pca():
'''
Dimension reduction using PCA
n_components = 2
'''
selected_nodes = json.loads(request.form.get('data'))['nodes']
color_col = json.loads(request.form.get('data'))['color_col']
print(selected_nodes)
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
with open(APP_STATIC+"/uploads/cols_info.json") as f:
cols_dict = json.load(f)
cols = json.loads(request.form.get('data'))['cols']
print(cols)
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
pca = PCA(n_components=2)
scaler = MinMaxScaler()
cols = cols_dict['cols_numerical']
data_new = scaler.fit_transform(data.loc[:,cols])
data_new = pca.fit_transform(data_new)
data_new = pd.DataFrame(data_new)
data_new.columns = ['pc1', 'pc2']
print(data.shape)
print(data_new)
# clustering
if len(selected_nodes)>0:
data_new['kmeans_cluster'] = KMeans(n_clusters=min(len(selected_nodes), 6), random_state=0).fit(data_new).labels_
else:
# data_new['kmeans_cluster'] = KMeans(n_clusters=10, random_state=0).fit(data_new).labels_
data_new['kmeans_cluster'] = KMeans(n_clusters=6, random_state=0).fit(data_new).labels_
if color_col in cols_dict['cols_categorical'] or color_col in cols_dict['cols_numerical']:
data_new['color_col'] = data[color_col]
data_new = data_new.to_json(orient='records')
return jsonify(pca=data_new)
@app.route('/update_cluster_details', methods=['POST','GET'])
def update_cluster_details():
label_column = request.get_data().decode('utf-8')
df = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
with open(APP_STATIC+"/uploads/cols_info.json") as f:
cols_dict = json.load(f)
labels = df[label_column]
if label_column in cols_dict['cols_numerical']:
labels = np.round(labels,2)
labels = list(labels)
return jsonify(labels=labels)
def run_mapper(data_array, col_names, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters=None):
"""This function is called when the form is submitted. It triggers construction of Mapper.
Each parameter of this function is defined in the configuration.
To customize the Mapper construction, you can inherit from :code:`KeplerMapperConfig` and customize this function.
Parameters
-------------
interval: int
Number of intervals
overlap: float
Percentage of overlap. This value will be divided by 100 to produce proporition.
dbscan_eps: float
:code:`eps` parameter for the DBSCAN clustering used in Kepler Mapper construction.
dbscan_min_samples: int
:code:`min_samples` parameter for the DBSCAN clustering used in Kepler Mapper construction.
filter_function: str
Projection for constructing the lens for Kepler Mapper.
"""
# data_array = np.array(data_array)
km_result, lens = _call_kmapper(data_array, col_names,
interval,
overlap,
clustering_alg,
clustering_alg_params,
filter_function,
filter_parameters
)
print(lens)
return _parse_result(km_result, filter_function, lens, data_array)
def _call_kmapper(data, col_names, interval, overlap, clustering_alg, clustering_alg_params, filter_function, filter_parameters=None):
print(filter_parameters)
mapper = KeplerMapper()
if len(col_names) == 1:
data_new = np.array(data[col_names[0]]).reshape(-1,1)
else:
data_new = np.array(data[col_names])
if len(filter_function) == 1:
f = filter_function[0]
if f in data.columns:
lens = data[f]
else:
lens = compute_lens(f, data_new, mapper, filter_parameters)
elif len(filter_function) == 2:
lens = []
for f in filter_function:
if f in data.columns:
lens_f = np.array(data[f]).reshape(-1,1)
else:
lens_f = compute_lens(f, data_new, mapper, filter_parameters)
lens.append(lens_f)
lens = np.concatenate((lens[0], lens[1]), axis=1)
# clusterer = sklearn.cluster.DBSCAN(eps=eps, min_samples=min_samples, metric='euclidean', n_jobs=8)
print(data_new.shape)
print(np.max(np.max(data_new)))
print(np.mean(np.mean(data_new)))
if clustering_alg == "DBSCAN":
graph = mapper.map_parallel(lens, data_new, clusterer=cluster.DBSCAN(eps=float(clustering_alg_params["eps"]), min_samples=float(clustering_alg_params["min_samples"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))
elif clustering_alg == "Agglomerative Clustering":
graph = mapper.map_parallel(lens, data_new, clusterer=cluster.AgglomerativeClustering(n_clusters=None, linkage=clustering_alg_params["linkage"], distance_threshold=float(clustering_alg_params["dist"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))
# graph = mapper.map_parallel(lens, data_new, clusterer=cluster.AgglomerativeClustering( linkage=clustering_alg_params["linkage"]), cover=Cover(n_cubes=interval, perc_overlap=overlap))
elif clustering_alg == "Mean Shift":
graph = mapper.map_parallel(lens, data_new, clusterer=cluster.MeanShift(bandwidth=float(clustering_alg_params["bandwidth"])), cover=Cover(n_cubes=interval, perc_overlap=overlap))
# graph = mapper.map_parallel(lens, data_new, clusterer=cluster.MeanShift(bandwidth=1), cover=Cover(n_cubes=interval, perc_overlap=overlap))
print(len(graph['nodes'].keys()))
# graph = mapper.map(lens, data_new, clusterer=cluster.DBSCAN(eps=eps, min_samples=min_samples), cover=Cover(n_cubes=interval, perc_overlap=overlap))
# return graph
return (graph, lens)
def compute_lens(f, data, mapper, filter_parameters=None):
data_array = np.array(data)
if f in ["sum", "mean", "median", "max", "min", "std", "l2norm"]:
lens = mapper.fit_transform(data_array, projection=f).reshape(-1,1)
elif f == "Density":
density_kernel = filter_parameters['density_kernel']
density_bandwidth = filter_parameters['density_bandwidth']
print("density", density_kernel, density_bandwidth)
kde = KernelDensity(kernel=density_kernel, bandwidth=density_bandwidth).fit(data_array)
lens = kde.score_samples(data_array).reshape(-1,1)
scaler = MinMaxScaler()
lens = scaler.fit_transform(lens)
elif f == "Eccentricity":
p = filter_parameters['eccent_p']
distance_matrix = filter_parameters['eccent_dist']
print("eccent", p, distance_matrix)
pdist = distance.squareform(distance.pdist(data_array, metric=distance_matrix))
lens = np.array([(np.sum(pdist**p, axis=1)/len(data_array))**(1/p)]).reshape(-1,1)
elif f == "PC1":
pca = PCA(n_components=min(2, data_array.shape[1]))
lens = pca.fit_transform(data_array)[:,0].reshape(-1,1)
elif f == "PC2":
if data_array.shape[1] > 1:
pca = PCA(n_components=2)
lens = pca.fit_transform(data_array)[:,1].reshape(-1,1)
# else:
# lens = np.array(data[f]).reshape(-1,1)
return lens
def _parse_result(graph, filter_function, lens, data_array=[]):
if len(data_array)>0:
col_names = data_array.columns
data_array = np.array(data_array)
data = {"nodes": [], "links": []}
# nodes
node_keys = graph['nodes'].keys()
name2id = {}
i = 1
nodes_detail = {}
for key in node_keys:
name2id[key] = i
cluster = graph['nodes'][key]
nodes_detail[i] = cluster
lens_values = {}
if len(filter_function) == 1:
lens_data = lens[cluster]
lens_avg = np.mean(lens_data)
lens_values[filter_function[0]] = lens_avg
elif len(filter_function) == 2:
for j in range(len(filter_function)):
lens_j = lens[:,j]
lens_data = lens_j[cluster]
lens_avg = np.mean(lens_data)
lens_values[filter_function[j]] = lens_avg
if len(data_array)>0:
cluster_data = data_array[cluster]
cluster_avg = np.mean(cluster_data, axis=0)
cluster_avg_dict = {}
for j in range(len(col_names)):
cluster_avg_dict[col_names[j]] = cluster_avg[j]
data['nodes'].append({
"id": str(i),
"size": len(graph['nodes'][key]),
"avgs": cluster_avg_dict,
"lens_avg": lens_values,
"vertices": cluster
})
else:
data['nodes'].append({
"id": str(i),
"size": len(graph['nodes'][key]),
"lens_avg": lens_values,
"vertices": cluster
})
i += 1
with open(APP_STATIC+"/uploads/nodes_detail.json","w") as f:
json.dump(nodes_detail, f)
# links
links = set()
for link_from in graph['links'].keys():
for link_to in graph['links'][link_from]:
from_id = name2id[link_from]
to_id = name2id[link_to]
left_id = min(from_id, to_id)
right_id = max(from_id, to_id)
links.add((left_id, right_id))
for link in links:
data['links'].append({"source": link[0], "target": link[1]})
return data
def compute_cc(graph):
'''
Compute connected components for the mapper graph
'''
G = nx.Graph()
for node in graph['nodes']:
nodeId = int(node['id'])-1
G.add_node(nodeId)
for edge in graph['links']:
sourceId = int(edge['source'])-1
targetId = int(edge['target'])-1
G.add_edge(sourceId, targetId)
cc = nx.connected_components(G)
cc_list = []
for c in cc:
cc_list.append(list(c))
return cc_list
def get_selected_data(selected_nodes):
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
with open(APP_STATIC+"/uploads/cols_info.json") as f:
cols_dict = json.load(f)
cols = cols_dict['cols_numerical']
print(cols)
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
return data, cols
@app.route('/module_extension', methods=['POST','GET'])
def module_extension():
module_info = ""
with open(APP_STATIC+"/uploads/new_modules.json") as f:
module_info = json.load(f)
return module_info
@app.route('/module_computing', methods=['POST','GET'])
def module_computing():
json_data = json.loads(request.form.get('data'))
selected_nodes = json_data['nodes']
data, cols = get_selected_data(selected_nodes)
module_info = json_data['module_info']
data_new = call_module_function(data, cols, module_info)
# data_new['kmeans_cluster'] = KMeans(n_clusters=4, random_state=0).fit(data_new).labels_
# data_new = data_new.to_json(orient='records')
# return jsonify(module_result=data_new)
return data_new
# kNN graph
# from pynndescent import NNDescent
# df = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
# activations = np.array(df['GrowthRate']).reshape(-1,1)
# k=5
# index = NNDescent(activations, n_neighbors=15, metric='euclidean')
# out = index.query(activations, k=k)
# dist = out[1]
# s_dist=np.sort(dist, axis=0)
# s_dist = list(s_dist[:,k-1].astype("str"))
# print(s_dist)
# return jsonify(s_dist=s_dist)
def call_module_function(data, cols, module_info):
mod_name, func_name = module_info['function-name'].rsplit('.',1)
mod = importlib.import_module(mod_name)
method_to_call = getattr(mod, func_name)
if module_info['module-type'] == "unsupervised_learning":
result = method_to_call(**module_info['function-parameters'])
data_new = result.fit_transform(data.loc[:,cols])
data_new = pd.DataFrame(data_new)
data_new_cols = []
for i in range(data_new.shape[1]):
data_new_cols.append("col"+str(i+1))
data_new.columns = data_new_cols
data_new['kmeans_cluster'] = KMeans(n_clusters=4, random_state=0).fit(data_new).labels_
data_new = data_new.to_json(orient='records')
data_new = jsonify(module_result=data_new)
elif module_info['module-type'] == "supervised_learning":
y = data.loc[:,module_info['input-variables']['dependent']]
X = data.loc[:,module_info['input-variables']['independent']]
X2 = sm.add_constant(X)
reg = method_to_call(np.asarray(y), np.asarray(X2))
result = reg.fit()
conf_int = np.array(result.conf_int())
conf_int_new = []
for i in range(conf_int.shape[0]):
conf_int_new.append(list(conf_int[i,:]))
print(result.summary())
# # cross validation
# from sklearn.linear_model import LogisticRegression
# from sklearn.model_selection import cross_validate
# clf = LogisticRegression(random_state=0).fit(X, y)
# scores = cross_validate(clf, X, y)
# test_scores = scores['test_score']
# data_new = jsonify(params=list(result.params), pvalues=list(result.pvalues), conf_int=conf_int_new, stderr=list(result.bse), llr_pvalue=result.llr_pvalue, test_scores=list(test_scores), y_name=module_info['input-variables']['dependent'], X_names=module_info['input-variables']['independent'])
data_new = jsonify(params=list(result.params), pvalues=list(result.pvalues), conf_int=conf_int_new, stderr=list(result.bse))
return data_new
@app.route('/export_graph', methods=['POST','GET'])
def export_graph():
jsdata = request.form.get('javascript_data')
jsdata1 = json.loads(jsdata)
if jsdata1["filename"] == "":
filename = path.join(APP_STATIC,"downloads/export.json")
else: filename = path.join(APP_STATIC,"downloads/",jsdata1["filename"]+".json")
with open(filename,"w") as outfile:
json.dump(jsdata1,outfile)
outfile.close()
return jsdata
@app.route('/export_clusters', methods=['POST','GET'])
def export_clusters():
jsdata = request.form.get('javascript_data')
jsdata1 = json.loads(jsdata)
# if jsdata1["filename"] == "":
# filename = path.join(APP_STATIC,"downloads/export.json")
# else: filename = path.join(APP_STATIC,"downloads/",jsdata1["filename"]+".json")
with open(filename,"w") as outfile:
json.dump(jsdata1,outfile)
outfile.close()
return jsdata
@app.route('/feature_selection', methods=['POST','GET'])
def feature_selection():
jsdata = json.loads(request.form.get('data'))
print(jsdata)
selected_nodes = jsdata['nodes']
y_name = jsdata['y']
X_names = jsdata['X']
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
y = data.loc[:,y_name]
X = data.loc[:,X_names]
lsvc = LinearSVC(C=1, dual=False).fit(X, y)
model = SelectFromModel(lsvc, prefit=True)
feature_idx = model.get_support()
feature_name= list(X.columns[feature_idx])
svc_score = lsvc.score(X,y)
return jsonify(feature_names=feature_name, svc_score=svc_score)
@app.route('/module_scatter_plot', methods=['POST','GET'])
def module_scatter_plot():
jsdata = json.loads(request.form.get('data'))
selected_nodes = jsdata['nodes']
x_name = jsdata['x_name']
y_name = jsdata['y_name']
color_name = jsdata['color_name']
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
x_col = data.loc[:,x_name]
y_col = data.loc[:,y_name]
color_col = data.loc[:, color_name]
return jsonify(x_name=x_name, x_col=list(x_col), y_name=y_name, y_col=list(y_col), color_name=color_name, color_col=list(color_col))
@app.route('/module_tsne', methods=['POST','GET'])
def module_tsne():
jsdata = json.loads(request.form.get('data'))
selected_nodes = jsdata['nodes']
color_col = jsdata['color_col']
data = pd.read_csv(APP_STATIC+"/uploads/processed_data.csv")
with open(APP_STATIC+"/uploads/cols_info.json") as f:
cols_dict = json.load(f)
cols = jsdata['cols']
print(cols)
with open(APP_STATIC+"/uploads/nodes_detail.json") as f:
nodes_detail = json.load(f)
if len(selected_nodes) > 0:
selected_rows = []
for node in selected_nodes:
selected_rows += nodes_detail[node]
selected_rows = list(set(selected_rows))
data = data.iloc[selected_rows, :]
data.index = range(len(data))
module_info = jsdata['module_info']
tsne = TSNE(n_components=2)
scaler = MinMaxScaler()
data_new = scaler.fit_transform(data.loc[:,cols])
data_new = tsne.fit_transform(data_new)
data_new = | pd.DataFrame(data_new) | pandas.DataFrame |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty], index=['ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_instruments_two_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr1 = 'ESZ15'
instr2 = 'CLZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.define_generic("CL", "CAD", 0.1, 1, 2.5)
blt.map_instrument("CL", "CLZ15")
blt._trade(ts, instr1, qty, price)
blt._trade(ts, instr2, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty, qty], index=['CLZ15', 'ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_trades_one_future_base_to_base(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price,
"USD", 1.0]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_one_future_with_mid_price_fx(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "CAD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price, "CAD",
1 / 1.3125]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_two_futures(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price1 = 2081
mid_price1 = 2080.75
price2 = 2083
mid_price2 = 2082.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.map_instrument("ES", "ESF16")
blt._trade(ts, instr, qty, price1, mid_price1)
blt._trade(ts, instr, qty, price2, mid_price2)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
data = [[instr, 1, 50, price1, mid_price1, "USD", 1.0],
[instr, 1, 50, price2, mid_price2, "USD", 1.0]]
exp_trades = pd.DataFrame(data, index=[ts, ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_create_unknown_event(self):
blt = self.make_blotter()
ts = pd.Timestamp('2015-08-03T00:00:00')
def create_unknown():
return blt.create_events(ts, "NotAllowed")
self.assertRaises(NotImplementedError, create_unknown)
def test_dispatch_unknown_event(self):
blt = self.make_blotter()
ev = blotter._Event("NotAnEvent",
{"timestamp": pd.Timestamp('2015-01-01')})
def dispatch_unknown():
blt.dispatch_events([ev])
self.assertRaises(NotImplementedError, dispatch_unknown)
def test_create_interest_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_interest_event_no_rate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
# No ZAR data
blt._holdings.update_cash(ts, "ZAR", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_interest():
return blt.create_events(ts, "INTEREST")
self.assertRaises(KeyError, get_interest)
def test_create_interest_weekend_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-06T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-07T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 3 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 3 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, 'SXMZ15', qty, price)
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
sxm_fp = os.path.join(self.prices, 'SXMZ15.csv')
sxm = pd.read_csv(sxm_fp, index_col=0, parse_dates=True)
usdcad_fp = os.path.join(self.prices, 'USDCAD.csv')
usdcad = pd.read_csv(usdcad_fp, index_col=0, parse_dates=True)
es_notional = es.loc[ts].values * qty * 0.05
sxm_notional = sxm.loc[ts].values * qty * 0.1 / usdcad.loc[ts].values
notnl = float(es_notional + sxm_notional)
quantity = notnl * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_short_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = -1
price = 0
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
es_notional = float(es.loc[ts].values * np.abs(qty) * 0.05)
quantity = es_notional * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, 'SXMZ15', qty, price)
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "PNL")
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
sxm_fp = os.path.join(self.prices, 'SXMZ15.csv')
sxm = pd.read_csv(sxm_fp, index_col=0, parse_dates=True)
prices = pd.concat([es.loc[ts], sxm.loc[ts]], axis=0)
ev_exp = [blotter._Event("PNL", {"timestamp": ts, "prices": prices})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_event_no_price(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
# No price info for BBBZ15
blt.define_generic("BBB", "CAD", 0.1, 1, 2.5)
blt.map_instrument("BBB", "BBBZ15")
blt._trade(ts, 'BBBZ15', qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
def no_price():
return blt.create_events(ts, "PNL")
self.assertRaises(KeyError, no_price)
def test_closed_position_pnl_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
blt._trade(ts, "ESZ15", -qty, price)
ts = pd.Timestamp('2015-08-06T00:00:00')
ev = blt.create_events(ts, "PNL")
ev_exp = [blotter._Event("PNL", {"timestamp": ts,
"prices": pd.Series([])})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_sweep_event_closed_pnl(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T12:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 50.50, 1, 1, 0, "CAD")
ts = pd.Timestamp('2015-08-03T14:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 51.50, -1, 1, 0, "CAD")
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "PNL_SWEEP")
evs_exp = [blotter._Event("PNL_SWEEP", {"timestamp": ts, "ccy1": "CAD",
"quantity1": -1.00,
"ccy2": "USD",
"quantity2": 1 / 1.3125})]
self.assertEventsEqual(evs, evs_exp)
def test_create_pnl_sweep_no_event_open_pnl_only(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
sweep_time=None,
accrual_time=pd.Timedelta("0h"),
eod_time=pd.Timedelta("0h"))
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T12:00:00')
pos = 1
blt.define_generic("SXM", "CAD", 0, 1, 0)
blt.map_instrument("SXM", "SXMZ15")
blt.trade(ts, 'SXMZ15', pos, price=800, ntc_price=800)
ts = pd.Timestamp('2015-08-04T00:00:00')
blt.automatic_events(ts)
evs = blt.create_events(ts, "PNL_SWEEP")
evs_exp = []
self.assertEventsEqual(evs, evs_exp)
def test_create_pnl_sweep_no_event_base(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T12:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 50.50, 1, 1, 0, "USD")
ts = pd.Timestamp('2015-08-03T14:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 51.50, -1, 1, 0, "USD")
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "PNL_SWEEP")
evs_exp = []
self.assertEqual(evs, evs_exp)
def test_create_pnl_sweep_no_event_pnl_already_swept(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T12:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 50.50, 1, 1, 0, "CAD")
ts = pd.Timestamp('2015-08-03T14:00:00')
blt._holdings.record_trade(ts, 'CLZ15', 51.50, -1, 1, 0, "CAD")
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "PNL_SWEEP")
blt.dispatch_events(evs)
ts = pd.Timestamp('2015-08-05T00:00:00')
evs = blt.create_events(ts, "PNL_SWEEP")
self.assertEqual(evs, [])
def test_create_trade_fx_AUDUSD(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
blt.define_generic("AUDUSD", "USD", 0, 1, 0, True)
blt.map_instrument("AUDUSD", "AUDUSD")
ts = pd.Timestamp('2015-08-03T12:00:00')
evs = blt._create_trade(ts, "AUDUSD", quantity=1000, price=0.80)
ev_exp = [blotter._Event("TRADE", {"timestamp": ts,
"instrument": "AUDUSD",
"ccy": "USD", "price": 0.80,
"quantity": 1000, "multiplier": 1,
"commission": 0}),
blotter._Event("CASH", {"timestamp": ts, "ccy": "USD",
"quantity": -1000 * 0.80}),
blotter._Event("CASH", {"timestamp": ts, "ccy": "AUD",
"quantity": 1000})]
self.assertEventsEqual(evs, ev_exp)
def test_create_trade_fx_USDCAD(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
blt.define_generic("USDCAD", "CAD", 0, 1, 0, True)
blt.map_instrument("USDCAD", "USDCAD")
ts = pd.Timestamp('2015-08-03T12:00:00')
evs = blt._create_trade(ts, "USDCAD", quantity=1000, price=1.31)
ev_exp = [blotter._Event("TRADE", {"timestamp": ts,
"instrument": "USDCAD",
"ccy": "CAD", "price": 1.31,
"quantity": 1000, "multiplier": 1,
"commission": 0}),
blotter._Event("CASH", {"timestamp": ts, "ccy": "CAD",
"quantity": -1000 * 1.31}),
blotter._Event("CASH", {"timestamp": ts, "ccy": "USD",
"quantity": 1000})]
self.assertEventsEqual(evs, ev_exp)
def test_create_trade_future(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
blt.define_generic("ES", "USD", 0, 1, 0, False)
blt.map_instrument("ES", "ESZ15")
ts = pd.Timestamp('2015-08-03T12:00:00')
evs = blt._create_trade(ts, "ESZ15", quantity=1, price=1800)
ev_exp = [blotter._Event("TRADE", {"timestamp": ts,
"instrument": "ESZ15",
"ccy": "USD", "price": 1800,
"quantity": 1, "multiplier": 1,
"commission": 0})]
self.assertEventsEqual(evs, ev_exp)
def test_create_trade_0_quantity(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
blt.connect_market_data()
blt.define_generic("ES", "USD", 0, 1, 0, False)
blt.map_instrument("ES", "ESZ15")
ts = pd.Timestamp('2015-08-03T12:00:00')
evs = blt._create_trade(ts, "ESZ15", 0, 1800)
self.assertEqual(evs, [])
def test_create_read_log(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# test events can be properly read error free
blt.read_log(self.log)
evs = blt._create_log_events(self.log)
ts1 = pd.Timestamp('2016-12-01T10:00:00')
ts2 = | pd.Timestamp('2016-12-02T10:00:00') | pandas.Timestamp |
#!/usr/bin/env python3
#
# main.py: main script for testing Persistent Weisfeiler--Lehman graph
# kernels.
import copy
import igraph as ig
import numpy as np
import pandas as pd
import argparse
import collections
import logging
from os.path import dirname, exists
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import graphkernels.kernels as gk
from tqdm import tqdm
from kernelgridsearchcv import KernelGridSearchCV
from features import FeatureSelector
from features import PersistentWeisfeilerLehman
from features import WeisfeilerLehmanSubtree
from utilities import read_labels
from sklearn.base import clone
def main(args, logger):
graphs = [ig.read(filename) for filename in args.FILES]
labels = read_labels(args.labels)
# Set the label to be uniform over all graphs in case no labels are
# available. This essentially changes our iteration to degree-based
# checks.
for graph in graphs:
if 'label' not in graph.vs.attributes():
graph.vs['label'] = [0] * len(graph.vs)
logger.info('Read {} graphs and {} labels'.format(len(graphs), len(labels)))
assert len(graphs) == len(labels)
# Calculate graph kernel
gram_matrix = gk.CalculateVertexHistKernel(graphs)
y = LabelEncoder().fit_transform(labels)
np.random.seed(42)
mean_accuracies = []
params = ['balanced']
cv_results = []
entry = {}
for param in params:
entry[param] = args.__dict__[param]
entry['dataset'] = dirname(args.FILES[0]).split('/')[1]
entry['baseline'] = 'vertex hist kernel'
for i in range(10):
# Contains accuracy scores for each cross validation step; the
# means of this list will be used later on.
accuracy_scores = []
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=i)
for n, indices in enumerate(cv.split(graphs, y)):
entry_fold = copy.copy(entry)
train_index = indices[0]
test_index = indices[1]
pipeline = Pipeline(
[
('clf', SVC(class_weight='balanced' if
args.balanced else None,
random_state=42, kernel='precomputed'))
],
)
grid_params = {
'clf__C': [1e1]
}
X_train, X_test = gram_matrix[train_index][:,train_index], gram_matrix[test_index][:,train_index]
y_train, y_test = y[train_index], y[test_index]
kgscv = KernelGridSearchCV(pipeline,
param_grid=grid_params, cv=cv,
random_state=42)
kgscv.fit(X_train, y_train)
p = kgscv._best_params
sc = kgscv._best_score
clf = kgscv._best_estimator
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = accuracy_score(y_test, y_pred)
accuracy_scores.append(acc)
for param, param_val in kgscv._best_params.items():
entry_fold[param] = param_val
entry[param] = ''
entry_fold['fold'] = n + 1
entry_fold['it'] = i
entry_fold['acc'] = acc * 100
entry_fold['std'] = 0.0
cv_results.append(entry_fold)
logger.info('Best classifier for this fold:{}'.format(kgscv._best_params))
mean_accuracies.append(np.mean(accuracy_scores))
logger.info(' - Mean 10-fold accuracy: {:2.2f} [running mean over all folds: {:2.2f}]'.format(mean_accuracies[-1] * 100, np.mean(mean_accuracies) * 100))
entry['fold'] = 'all'
entry['it'] = 'all'
entry['acc'] = np.mean(mean_accuracies) * 100
entry['std'] = np.std(mean_accuracies) * 100
cv_results.append(entry)
logger.info('Accuracy: {:2.2f} +- {:2.2f}'.format(np.mean(mean_accuracies) * 100, np.std(mean_accuracies) * 100))
if exists(args.result_file):
with open(args.result_file, 'a') as f:
pd.DataFrame(cv_results).to_csv(f, index=False, header=None)
else:
| pd.DataFrame(cv_results) | pandas.DataFrame |
import pandas as pd
import requests
import io
import numpy as np
import geoglows
#df = pd.read_csv('/Users/student/Dropbox/PhD/2020_Winter/Dissertation_v9/South_America/Colombia/IDEAM_Stations_v2.csv')
df = pd.read_csv('C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\Stations_Selected_Colombia_v3.csv')
IDs = df['Codigo'].tolist()
#COMIDs = df['COMID'].tolist()
COMIDs = df['new_COMID'].tolist()
Names = df['Nombre'].tolist()
Rivers = df['Corriente'].tolist()
# #data = pd.read_csv('/Users/student/Dropbox/PhD/2020 Winter/Dissertation_v9/South_America/Colombia/row_data/Excel_2021_06_03.csv')
# data = pd.read_csv('C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\row_data\\Excel_2021_06_03.csv')
# data.rename(columns={'Fecha': 'Datetime'}, inplace=True)
# data.set_index(['Datetime'], inplace=True, drop=True)
# data.index = pd.to_datetime(data.index)
#
# for id in IDs:
#
# print(id)
# station_data = data.loc[data['CodigoEstacion'] == id]
# station_data = station_data.drop(['CodigoEstacion', 'NombreEstacion', 'Latitud', 'Longitud', 'Altitud'], axis=1)
# station_data.rename(columns={'Valor': 'Streamflow (m3/s)'}, inplace=True)
#
# index = pd.date_range(station_data.index[0], station_data.index[len(station_data.index) - 1], freq='D')
# data_nan = [np.nan] * len(index)
# pairs = [list(a) for a in zip(index, data_nan)]
# df2 = pd.DataFrame(pairs, columns=['Datetime', 'Values'])
# df2.set_index(['Datetime'], inplace=True, drop=True)
#
# result = pd.concat([df2, station_data], axis=1, sort=False)
# result = result.drop(['Values'], axis=1)
#
# #result.to_csv("/Users/student/Github/Bias_Correction/Colombia/Updated/{0}.csv".format(id))
# result.to_csv("C:\\Users\\jsanch3z\\Dropbox\\PhD\\2020_Winter\\Dissertation_v9\\South_America\\Colombia\\Forecast\\Observed_Data\\Streamflow\\{0}.csv".format(id))
#
# print('Terminado con los observados')
for comid in COMIDs:
print(comid)
url = 'https://geoglows.ecmwf.int/api/HistoricSimulation/?reach_id={0}&return_format=csv'.format(comid)
s = requests.get(url, verify=False).content
simulated_df = pd.read_csv(io.StringIO(s.decode('utf-8')), index_col=0)
#simulated_df = geoglows.streamflow.historic_simulation(comid, forcing='era_5', return_format='csv')
simulated_df[simulated_df < 0] = 0
simulated_df.index = pd.to_datetime(simulated_df.index)
simulated_df.index = simulated_df.index.to_series().dt.strftime("%Y-%m-%d")
simulated_df.index = | pd.to_datetime(simulated_df.index) | pandas.to_datetime |
#!/usr/bin/python3
import sys
sys.path.insert(0, "/home/eric/ramukcire/estimating_cost_of_dc_services/syscost/")
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import re
from collections import Counter
import itertools
import warnings
from termcolor import colored
import streamlit as st
from subprocess import check_output
import traffic.traffic as traffic
# from traffic.traffic import traffic
from datetime import datetime as dt
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
print(colored('Imported Modules\n', 'yellow'))
print(colored('Running from '+str((os.getcwd())),'green'))
#print(colored('Other directories at this level are '+ str(os.listdir()),'red'))
st.title('Total Cost of Ownership Model (Hardy)')
st.subheader('<NAME>, Doctor of Design')
'''This script will run the Hardy model. For now it will not interact \
with the model directly, but will be able to consume the outputs \
from the Perl program, parse it, and pass it to SysCost EEIO inputs. \
'''
class costet(object):
def __init__(self, input_dc, input_r, streamlit=True, model_file=None):
'''Args: Runs the specified parameter TCO model.
input_dc: "input_example/dc.params" (Data-Center Parameters)
input_r: "input_example/r.params" (Resource Parameters)
streamlit = boolean for using streamlit
model_file = file name for the model output'''
self.input_dc = input_dc
self.input_r = input_r
self.model = check_output(["perl", "./cost-et.pl", input_dc, input_r], shell = False)
self.model = self.model.decode("utf-8")
self.streamlit = streamlit
self.model_file = model_file
def view_raw_output(self, save=None):
if self.streamlit is True:
st.header('Model run for ' +self.input_dc+' with '+self.input_r)
st.subheader('Output from Cost-ET Model run')
st.text(self.model)
if save is not None:
f = open(self.model_file, "w+")
f.write(str(self.model))
f.close()
print(colored('This is the output from the Cost-ET model: ' + self.model, 'yellow'))
def view_script(self, script):
'''Args: script: "cost-et.pl" '''
f = open(script, "r")
f = f.read()
print(colored('Print this :'+ f, 'magenta'))
if self.streamlit is True:
st.subheader('Print out of '+script)
st.code(f, language='perl')
def get_dc_params(self):
_df = pd.read_csv(self.model_file)[2:24].reset_index(drop=True)
_df.columns = ['DC_parameters']
_df[['DC Param','Value']] = _df['DC_parameters'].str.split("=",expand=True)
_df = _df[['DC Param','Value']]
if self.streamlit is True:
st.subheader('DC Parameters: ')
st.dataframe(_df, 500, 600)
return _df
def get_resource_params(self):
_df = pd.read_csv(self.model_file)[29:76].reset_index(drop=True)
_df.columns = ['Resource_parameters']
_df[['Resource','Value']] = _df['Resource_parameters'].str.split("=",expand=True)
_df = _df[['Resource','Value']]
if self.streamlit is True:
st.subheader('Resources Parameters: ')
st.dataframe(_df, 500, 600)
return _df
def get_server_age(self):
_df = pd.read_csv(self.model_file)[79:85].reset_index(drop=True)
_df.columns = ['Age Dist']
_df[['Age (Years)','Server Count']] = _df['Age Dist'].str.split(" ",expand=True)
_df = _df[['Age (Years)','Server Count']]
if self.streamlit is True:
st.subheader('Age: ')
st.dataframe(_df, 500, 1000)
return _df
def get_server_replacement(self):
'''Unclear what this calue means ATM.'''
_df = pd.read_csv(self.model_file)[85:86].reset_index(drop=True)
_df.columns = ['Server Replacements']
_df[['Count','Server Count']] = _df['Server Replacements'].str.split(" ",expand=True)
_df = _df[['Count']]
if self.streamlit is True:
st.subheader('Server Replacement: ')
st.dataframe(_df, 500, 1000)
return _df
def get_dc_costs(self):
# This requires that the model be run. To create a new model txt file.
_df = pd.read_csv(self.model_file)[90:96].reset_index(drop=True)
_df.columns = ['DC Costs']
_df[['Cost_Component', 'Cost', 'Unit', 'Relative']] = _df['DC Costs'].str.split(" ",expand=True)
_df = _df[['Cost_Component','Cost','Unit', 'Relative']].iloc[1:,:]
_df['Cost_Component'] = _df['Cost_Component'].str[:-1]
_df.set_index('Cost_Component', inplace = True)
#_df.index = _df['Cost_Component']
if self.streamlit is True:
st.subheader('Data Center Costs: ')
st.dataframe(_df, 500, 1000)
return _df
def get_dc_tco(self):
_df = | pd.read_csv(self.model_file) | pandas.read_csv |
from __future__ import annotations
import typing
from abc import abstractmethod
if typing.TYPE_CHECKING:
from pandasgui.gui import PandasGui
from pandasgui.widgets.filter_viewer import FilterViewer
from pandasgui.widgets.dataframe_viewer import DataFrameViewer
from pandasgui.widgets.dataframe_explorer import DataFrameExplorer
from pandasgui.widgets.navigator import Navigator
from dataclasses import dataclass, field
from typing import Iterable, List, Union
from typing_extensions import Literal
import pandas as pd
from pandas import DataFrame
from PyQt5 import QtCore, QtWidgets
import traceback
from datetime import datetime
from pandasgui.utility import unique_name, in_interactive_console, refactor_variable, clean_dataframe, nunique, \
parse_cell, parse_all_dates, parse_date, get_movements
from pandasgui.constants import LOCAL_DATA_DIR
import os
from enum import Enum
import json
import inspect
import logging
import contextlib
logger = logging.getLogger(__name__)
# JSON file that stores persistent user preferences
preferences_path = os.path.join(LOCAL_DATA_DIR, 'preferences.json')
def read_saved_settings():
if not os.path.exists(preferences_path):
write_saved_settings({})
return {}
else:
try:
with open(preferences_path, 'r') as f:
saved_settings = json.load(f)
return saved_settings
except Exception as e:
logger.warning("Error occurred reading preferences. Resetting to defaults\n" + traceback.format_exc())
write_saved_settings({})
return {}
def write_saved_settings(settings):
with open(preferences_path, 'w') as f:
json.dump(settings, f)
class DictLike:
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
class Setting(DictLike):
def __init__(self, label, value, description, dtype, persist):
self.label: str = label
self.value: any = value
self.description: str = description
self.dtype: Union[type(str), type(bool), Enum] = dtype
self.persist: bool = persist
def __setattr__(self, key, value):
try:
if self.persist:
settings = read_saved_settings()
settings[self.label] = value
write_saved_settings(settings)
except AttributeError:
# Get attribute error because of __setattr__ happening in __init__ before self.persist is set
pass
super().__setattr__(key, value)
DEFAULT_SETTINGS = {'editable': True,
'block': None,
'theme': 'light',
'auto_finish': True,
'refresh_statistics': True,
'render_mode': 'auto',
'aggregation': 'mean',
'title_format': "{name}: {title_columns}{title_dimensions}{names}{title_y}{title_z}{over_by}"
"{title_x} {selection}<br><sub>{groupings}{filters} {title_trendline}</sub>"
}
@dataclass
class SettingsStore(DictLike, QtCore.QObject):
settingsChanged = QtCore.pyqtSignal()
block: Setting
editable: Setting
theme: Setting
auto_finish: Setting
render_mode: Setting
aggregation: Setting
title_format: Setting
def __init__(self, **settings):
super().__init__()
saved_settings = read_saved_settings()
for setting_name in DEFAULT_SETTINGS.keys():
# Fill settings values if not provided
if setting_name not in settings.keys():
if setting_name in saved_settings.keys():
settings[setting_name] = saved_settings[setting_name]
else:
settings[setting_name] = DEFAULT_SETTINGS[setting_name]
if in_interactive_console():
# Don't block if in an interactive console (so you can view GUI and still continue running commands)
settings['block'] = False
else:
# If in a script, block or else the script will continue and finish without allowing GUI interaction
settings['block'] = True
self.block = Setting(label="block",
value=settings['block'],
description="Should GUI block code execution until closed?",
dtype=bool,
persist=False)
self.editable = Setting(label="editable",
value=settings['editable'],
description="Are table cells editable?",
dtype=bool,
persist=True)
self.theme = Setting(label="theme",
value=settings['theme'],
description="UI theme",
dtype=Literal['light', 'dark', 'classic'],
persist=True)
self.refresh_statistics = Setting(label="refresh_statistics",
value=settings['refresh_statistics'],
description="Recalculate statistics when data changes",
dtype=bool,
persist=True)
# Settings related to Grapher
self.auto_finish = Setting(label="auto_finish",
value=settings['auto_finish'],
description="Automatically renders plot after each drag and drop",
dtype=bool,
persist=True)
self.render_mode = Setting(label="render_mode",
value=settings['render_mode'],
description="render_mode",
dtype=Literal['auto', 'webgl', 'svg'],
persist=True)
self.aggregation = Setting(label="aggregation",
value=settings['aggregation'],
description="aggregation",
dtype=Literal['mean', 'median', 'min', 'max', 'sum', None],
persist=True)
self.title_format = Setting(label="title_format",
value=settings['title_format'],
description="title_format",
dtype=dict,
persist=True)
def reset_to_defaults(self):
for setting_name, setting_value in DEFAULT_SETTINGS.items():
self[setting_name].value = setting_value
def __repr__(self):
return '\n'.join([f"{key} = {val.value}" for key, val in self.__dict__.items()])
@dataclass
class Filter:
expr: str
enabled: bool
failed: bool
@dataclass
class HistoryItem:
comment: str
code: str
time: str
def __init__(self, comment, code):
self.comment = comment
self.code = code
self.time = datetime.now().strftime("%H:%M:%S")
# Use this decorator on PandasGuiStore or PandasGuiDataFrameStore to display a status bar message during a method run
def status_message_decorator(message):
def decorator(function):
def status_message_wrapper(self, *args, **kwargs):
if not (issubclass(type(self), PandasGuiStore) or issubclass(type(self), PandasGuiDataFrameStore)):
raise ValueError
full_kwargs = kwargs.copy()
# Allow putting method argument values in the status message by putting them in curly braces
args_spec = inspect.getfullargspec(function).args
args_spec.pop(0) # Removes self
for ix, arg_name in enumerate(args_spec):
# Need to check length because if the param has default value it may be in args_spec but not args
if ix < len(args):
full_kwargs[arg_name] = args[ix]
new_message = message
for arg_name in full_kwargs.keys():
new_message = new_message.replace('{' + arg_name + '}', str(full_kwargs[arg_name]))
if self.gui is not None:
original_status = self.gui.statusBar().currentMessage()
self.gui.statusBar().showMessage(new_message)
self.gui.statusBar().repaint()
QtWidgets.QApplication.instance().processEvents()
try:
result = function(self, *args, **kwargs)
finally:
self.gui.statusBar().showMessage(original_status)
self.gui.statusBar().repaint()
QtWidgets.QApplication.instance().processEvents()
else:
result = function(self, *args, **kwargs)
return result
return status_message_wrapper
return decorator
# Objects to display in the PandasGuiStore must inherit this class
class PandasGuiStoreItem:
def __init__(self):
self.name = None
@abstractmethod
def pg_widget(self):
raise NotImplementedError
class PandasGuiDataFrameStore(PandasGuiStoreItem):
"""
All methods that modify the data should modify self.df_unfiltered, then self.df gets computed from that
"""
def __init__(self, df: DataFrame, name: str = 'Untitled'):
super().__init__()
df = df.copy()
self.df: DataFrame = df
self.df_unfiltered: DataFrame = df
self.name = name
self.history: List[HistoryItem] = []
self.history_imports = {"import pandas as pd"}
# References to other object instances that may be assigned later
self.settings: SettingsStore = SETTINGS_STORE
self.store: Union[PandasGuiStore, None] = None
self.gui: Union[PandasGui, None] = None
self.dataframe_explorer: DataFrameExplorer = None
self.dataframe_viewer: Union[DataFrameViewer, None] = None
self.stats_viewer: Union[DataFrameViewer, None] = None
self.filter_viewer: Union[FilterViewer, None] = None
self.sorted_column_name: Union[str, None] = None
self.sorted_index_level: Union[int, None] = None
self.sort_state: Literal['Asc', 'Desc', 'None'] = 'None'
self.filters: List[Filter] = []
self.filtered_index_map = df.reset_index().index
# Statistics
self.column_statistics = None
self.row_statistics = None
self.statistics_outdated = True
self.data_changed()
@property
def sorted_column_ix(self):
try:
return list(self.df_unfiltered.columns).index(self.sorted_column_name)
except ValueError:
return None
def __setattr__(self, name, value):
if name == 'df':
value.pgdf = self
super().__setattr__(name, value)
def pg_widget(self):
return self.dataframe_explorer
@status_message_decorator("Refreshing statistics...")
def refresh_statistics(self, force=False):
if force or self.settings.refresh_statistics.value:
df = self.df
self.column_statistics = pd.DataFrame({
"Type": df.dtypes.astype(str),
"Count": df.count(),
"N Unique": nunique(df),
"Mean": df.mean(numeric_only=True),
"StdDev": df.std(numeric_only=True),
"Min": df.min(numeric_only=True),
"Max": df.max(numeric_only=True),
}, index=df.columns
)
df = self.df.transpose()
df_numeric = self.df.select_dtypes('number').transpose()
self.row_statistics = pd.DataFrame({
# "Type": df.dtypes.astype(str),
# "Count": df.count(),
# "N Unique": nunique(df),
# "Mean": df_numeric.mean(numeric_only=True),
# "StdDev": df_numeric.std(numeric_only=True),
# "Min": df_numeric.min(numeric_only=True),
"Max": df_numeric.max(numeric_only=True),
}, index=df.columns
)
if self.dataframe_explorer is not None:
self.dataframe_explorer.statistics_viewer.refresh_statistics()
###################################
# Code history
@status_message_decorator("Generating code export...")
def code_export(self):
if len(self.history) == 0 and not any([filt.enabled for filt in self.filters]):
return f"# No actions have been recorded yet on this DataFrame ({self.name})"
code_history = "# 'df' refers to the DataFrame passed into 'pandasgui.show'\n\n"
# Add imports to setup
code_history += '\n'.join(self.history_imports) + '\n\n'
for history_item in self.history:
code_history += f'# {history_item.comment}\n'
code_history += history_item.code
code_history += "\n\n"
if any([filt.enabled for filt in self.filters]):
code_history += f"# Filters\n"
for filt in self.filters:
if filt.enabled:
code_history += f"df = df.query('{filt.expr}')\n"
return code_history
def add_history_item(self, comment, code):
history_item = HistoryItem(comment, code)
self.history.append(history_item)
if self.gui is not None:
self.gui.update_code_export()
###################################
# Editing cell data
@status_message_decorator("Applying cell edit...")
def edit_data(self, row, col, text):
column_dtype = self.df.dtypes[col].type
# type should always be str when being called from PyQt GUI but someone might call this directly
if type(text) == str:
value = parse_cell(text, column_dtype)
# Map the row number in the filtered df (which the user interacts with) to the unfiltered one
row = self.filtered_index_map[row]
old_val = self.df_unfiltered.iat[row, col]
if old_val != value and not (pd.isna(old_val) and pd.isna(value)):
self.df_unfiltered.iat[row, col] = value
self.apply_filters()
self.add_history_item("edit_data",
f"df.iat[{row}, {col}] = {repr(value)}")
@status_message_decorator("Pasting data...")
def paste_data(self, top_row, left_col, df_to_paste):
new_df = self.df_unfiltered.copy()
# Not using iat here because it won't work with MultiIndex
for i in range(df_to_paste.shape[0]):
for j in range(df_to_paste.shape[1]):
value = df_to_paste.iloc[i, j]
new_df.at[self.df.index[top_row + i],
self.df.columns[left_col + j]] = value
self.df_unfiltered = new_df
self.apply_filters()
self.add_history_item("paste_data", inspect.cleandoc(
f"""
df_to_paste = pd.DataFrame({df_to_paste.to_dict(orient='list')})
for i in range(df_to_paste.shape[0]):
for j in range(df_to_paste.shape[1]):
value = df_to_paste.iloc[i, j]
df.at[df.index[{top_row} + i],
df.columns[{left_col} + j]] = value
"""))
###################################
# Changing columns
@status_message_decorator("Deleting column...")
def delete_column(self, ix: int):
col_name = self.df_unfiltered.columns[ix]
self.df_unfiltered = self.df_unfiltered.drop(col_name, axis=1)
# Need to inform the PyQt model too so column widths properly shift
self.dataframe_viewer._remove_column(ix)
self.add_history_item("delete_column",
f"df = df.drop('{col_name}', axis=1)")
self.apply_filters()
@status_message_decorator("Moving columns...")
def move_column(self, src: int, dest: int):
cols = list(self.df_unfiltered.columns)
cols.insert(dest, cols.pop(src))
self.df_unfiltered = self.df_unfiltered.reindex(cols, axis=1)
self.add_history_item("move_column",
(f"cols = list(df.columns)"
f"cols.insert({dest}, cols.pop({src}))"
f"df = df.reindex(cols, axis=1)"))
self.dataframe_viewer.setUpdatesEnabled(False)
# Need to inform the PyQt model too so column widths properly shift
self.dataframe_viewer._move_column(src, dest)
self.apply_filters()
self.dataframe_viewer.setUpdatesEnabled(True)
@status_message_decorator("Reordering columns...")
def reorder_columns(self, columns: List[str]):
if sorted(list(columns)) != sorted(list(self.df_unfiltered.columns)):
raise ValueError("Provided column names do not match DataFrame")
original_columns = list(self.df_unfiltered.columns)
self.df_unfiltered = self.df_unfiltered.reindex(columns=columns)
self.dataframe_viewer.setUpdatesEnabled(False)
# Move columns around in TableView to maintain column widths
for (src, dest) in get_movements(original_columns, columns):
self.dataframe_viewer._move_column(src, dest, refresh=False)
self.apply_filters()
self.dataframe_viewer.setUpdatesEnabled(True)
self.add_history_item("reorder_columns",
f"df = df.reindex(columns={columns})")
###################################
# Sorting
@status_message_decorator("Sorting column...")
def sort_column(self, ix: int, next_sort_state: Literal['Asc', 'Desc', 'None'] = None):
col_name = self.df_unfiltered.columns[ix]
# Determine next sorting state by current state
if next_sort_state is None:
# Clicked an unsorted column
if ix != self.sorted_column_ix:
next_sort_state = 'Asc'
# Clicked a sorted column
elif ix == self.sorted_column_ix and self.sort_state == 'Asc':
next_sort_state = 'Desc'
# Clicked a reverse sorted column - reset to sorted by index
elif ix == self.sorted_column_ix:
next_sort_state = 'None'
if next_sort_state == 'Asc':
self.df_unfiltered = self.df_unfiltered.sort_values(col_name, ascending=True, kind='mergesort')
self.sorted_column_name = self.df_unfiltered.columns[ix]
self.sort_state = 'Asc'
self.add_history_item("sort_column",
f"df = df.sort_values('{self.df_unfiltered.columns[ix]}', ascending=True, kind='mergesort')")
elif next_sort_state == 'Desc':
self.df_unfiltered = self.df_unfiltered.sort_values(col_name, ascending=False, kind='mergesort')
self.sorted_column_name = self.df_unfiltered.columns[ix]
self.sort_state = 'Desc'
self.add_history_item("sort_column",
f"df = df.sort_values('{self.df_unfiltered.columns[ix]}', ascending=False, kind='mergesort')")
elif next_sort_state == 'None':
self.df_unfiltered = self.df_unfiltered.sort_index(ascending=True, kind='mergesort')
self.sorted_column_name = None
self.sort_state = 'None'
self.add_history_item("sort_column",
"df = df.sort_index(ascending=True, kind='mergesort')")
self.sorted_index_level = None
self.apply_filters()
@status_message_decorator("Sorting index...")
def sort_index(self, ix: int):
# Clicked an unsorted index level
if ix != self.sorted_index_level:
self.df_unfiltered = self.df_unfiltered.sort_index(level=ix, ascending=True, kind='mergesort')
self.sorted_index_level = ix
self.sort_state = 'Asc'
self.add_history_item("sort_index",
f"df = df.sort_index(level={ix}, ascending=True, kind='mergesort')")
# Clicked a sorted index level
elif ix == self.sorted_index_level and self.sort_state == 'Asc':
self.df_unfiltered = self.df_unfiltered.sort_index(level=ix, ascending=False, kind='mergesort')
self.sorted_index_level = ix
self.sort_state = 'Desc'
self.add_history_item("sort_index",
f"df = df.sort_index(level={ix}, ascending=False, kind='mergesort')")
# Clicked a reverse sorted index level - reset to sorted by full index
elif ix == self.sorted_index_level:
self.df_unfiltered = self.df_unfiltered.sort_index(ascending=True, kind='mergesort')
self.sorted_index_level = None
self.sort_state = 'None'
self.add_history_item("sort_index",
"df = df.sort_index(ascending=True, kind='mergesort')")
self.sorted_column = None
self.apply_filters()
def change_column_type(self, ix: int, type):
name = self.df_unfiltered.columns[ix]
self.df_unfiltered[name] = self.df_unfiltered[name].astype(type)
self.apply_filters()
self.add_history_item("change_column_type",
f"df[{name}] = df[{name}].astype({type})")
###################################
# Filters
def any_filtered(self):
return any(filt.enabled for filt in self.filters)
def add_filter(self, expr: str, enabled=True):
filt = Filter(expr=expr, enabled=enabled, failed=False)
self.filters.append(filt)
self.apply_filters()
def remove_filter(self, index: int):
self.filters.pop(index)
self.apply_filters()
def edit_filter(self, index: int, expr: str):
filt = self.filters[index]
filt.expr = expr
filt.failed = False
self.apply_filters()
def toggle_filter(self, index: int):
self.filters[index].enabled = not self.filters[index].enabled
self.apply_filters()
@status_message_decorator("Applying filters...")
def apply_filters(self):
df = self.df_unfiltered.copy()
df['_temp_range_index'] = df.reset_index().index
for ix, filt in enumerate(self.filters):
if filt.enabled and not filt.failed:
try:
df = df.query(filt.expr)
# Handle case where filter returns only one row
if isinstance(df, pd.Series):
df = df.to_frame().T
except Exception as e:
self.filters[ix].failed = True
logger.exception(e)
# self.filtered_index_map is used elsewhere to map unfiltered index to filtered index
self.filtered_index_map = df['_temp_range_index'].reset_index(drop=True)
df = df.drop('_temp_range_index', axis=1)
self.df = df
self.data_changed()
# Convert all columns to datetime where possible
def parse_all_dates(self):
df = self.df_unfiltered
converted_names = []
dtypes_old = df.dtypes
df = parse_all_dates(df)
dtypes_new = df.dtypes
for ix in range(len(dtypes_new)):
col_name = df.columns[ix]
# Pandas is sometimes buggy when comparing dtypes
try:
if dtypes_old[ix] != dtypes_new[ix]:
converted_names.append(str(col_name))
except:
pass
if converted_names:
logger.info(f"In {self.name}, converted columns to datetime: {', '.join(converted_names)}")
else:
logger.warning(f"In {self.name}, unable to parse any columns as datetime")
self.df_unfiltered = df
self.apply_filters()
# Convert a single column to date
def parse_date(self, ix):
df = self.df_unfiltered
name = list(df.columns)[ix]
dtype_old = df[name].dtype
df[name] = parse_date(df[name])
dtype_new = df[name].dtype
if dtype_old != dtype_new:
logger.info(f"In {self.name}, converted {name} to datetime")
else:
logger.warning(f"In {self.name}, unable to convert {name} to datetime")
self.df_unfiltered = df
self.apply_filters()
###################################
# Other
def data_changed(self):
self.refresh_ui()
self.refresh_statistics()
# Remake Grapher plot
if self.dataframe_explorer is not None:
self.dataframe_explorer.grapher.on_dragger_finished()
# Refresh PyQt models when the underlying pgdf is changed in anyway that needs to be reflected in the GUI
def refresh_ui(self):
self.models = []
if self.filter_viewer is not None:
self.models += [self.filter_viewer.list_model]
for model in self.models:
model.beginResetModel()
model.endResetModel()
if self.dataframe_viewer is not None:
self.dataframe_viewer.refresh_ui()
@staticmethod
def cast(df: Union[PandasGuiDataFrameStore, pd.DataFrame, pd.Series, Iterable]):
if isinstance(df, PandasGuiDataFrameStore):
return df
if isinstance(df, pd.DataFrame):
return PandasGuiDataFrameStore(df.copy())
elif isinstance(df, pd.Series):
return PandasGuiDataFrameStore(df.to_frame())
else:
try:
return PandasGuiDataFrameStore(pd.DataFrame(df))
except:
raise TypeError(f"Could not convert {type(df)} to DataFrame")
@dataclass
class PandasGuiStore:
"""This class stores all state data of the PandasGUI main GUI.
Attributes:
settings Settings as defined in SettingsStore
data A dict of PandasGuiDataFrameStore instances which wrap DataFrames. These show up in left nav
data A dict of other widgets that can show up in the left nav such as JsonViewer and FigureViewer
gui A reference to the PandasGui widget instance
navigator A reference to the Navigator widget instance
selected_pgdf The PandasGuiDataFrameStore currently selected in the nav
"""
settings: Union[SettingsStore, None] = None
data: typing.OrderedDict[str, Union[PandasGuiStoreItem, PandasGuiDataFrameStore]] = field(default_factory=dict)
gui: Union[PandasGui, None] = None
navigator: Union[Navigator, None] = None
selected_pgdf: Union[PandasGuiDataFrameStore, None] = None
def __post_init__(self):
self.settings = SETTINGS_STORE
###################################
# IPython magic
@status_message_decorator("Executing IPython command...")
def eval_magic(self, line):
dataframes_affected = []
command = line
for name in self.data.keys():
command = refactor_variable(command, name, f"self.data['{name}'].df_unfiltered")
if name in command:
dataframes_affected.append(name)
exec(command)
for name in dataframes_affected:
self.data[name].apply_filters()
self.data[name].add_history_item("iPython magic",
refactor_variable(line, name, 'df'))
return line
###################################
# Use this context to display a status message for a block. self should be a PandasGuiStore or PandasGuiDataFrameStore
@contextlib.contextmanager
def status_message_context(self, message):
if self.gui is not None:
original_status = self.gui.statusBar().currentMessage()
self.gui.statusBar().showMessage(message)
self.gui.statusBar().repaint()
QtWidgets.QApplication.instance().processEvents()
try:
yield
finally:
self.gui.statusBar().showMessage(original_status)
self.gui.statusBar().repaint()
QtWidgets.QApplication.instance().processEvents()
###################################
def add_item(self, item: PandasGuiStoreItem,
name: str = "Untitled", shape: str = ""):
# Add it to store and create widgets
self.data[name] = item
self.gui.stacked_widget.addWidget(item.pg_widget())
# Add to nav
nav_item = QtWidgets.QTreeWidgetItem(self.navigator, [name, shape])
self.navigator.itemSelectionChanged.emit()
self.navigator.setCurrentItem(nav_item)
self.navigator.apply_tree_settings()
def remove_item(self, name_or_index):
if type(name_or_index) == int:
ix = name_or_index
name = list(self.data.keys())[ix]
elif type(name_or_index) == str:
name = name_or_index
else:
raise ValueError
item = self.data[name]
if isinstance(item, PandasGuiDataFrameStore):
widget = item.dataframe_explorer
else:
widget = item
self.data.pop(name)
self.gui.navigator.remove_item(name)
self.gui.stacked_widget.removeWidget(widget)
@status_message_decorator("Adding DataFrame...")
def add_dataframe(self, pgdf: Union[DataFrame, PandasGuiDataFrameStore],
name: str = "Untitled"):
name = unique_name(name, self.get_dataframes().keys())
with self.status_message_context("Adding DataFrame (Creating DataFrame store)..."):
pgdf = PandasGuiDataFrameStore.cast(pgdf)
pgdf.settings = self.settings
pgdf.name = name
pgdf.store = self
pgdf.gui = self.gui
with self.status_message_context("Cleaning DataFrame..."):
pgdf.df = clean_dataframe(pgdf.df, name)
pgdf.data_changed()
if pgdf.dataframe_explorer is None:
from pandasgui.widgets.dataframe_explorer import DataFrameExplorer
pgdf.dataframe_explorer = DataFrameExplorer(pgdf)
# Add to nav
shape = pgdf.df.shape
shape = f"{shape[0]:,} x {shape[1]:,}"
self.add_item(pgdf, name, shape)
def remove_dataframe(self, name_or_index):
self.remove_item(name_or_index)
@status_message_decorator('Importing file "{path}"...')
def import_file(self, path):
if not os.path.isfile(path):
logger.warning("Path is not a file: " + path)
elif path.endswith(".csv"):
filename = os.path.split(path)[1].split('.csv')[0]
df = pd.read_csv(path, engine='python')
self.add_dataframe(df, filename)
elif path.endswith(".xlsx"):
filename = os.path.split(path)[1].split('.csv')[0]
df_dict = pd.read_excel(path, sheet_name=None)
for sheet_name in df_dict.keys():
df_name = f"{filename} - {sheet_name}"
self.add_dataframe(df_dict[sheet_name], df_name)
elif path.endswith(".parquet"):
filename = os.path.split(path)[1].split('.parquet')[0]
df = pd.read_parquet(path, engine='pyarrow')
self.add_dataframe(df, filename)
elif path.endswith(".json"):
filename = os.path.split(path)[1].split('.json')[0]
with open(path) as f:
data = json.load(f)
from pandasgui.widgets.json_viewer import JsonViewer
jv = JsonViewer(data)
self.add_item(jv, filename)
elif path.endswith(".pkl"):
filename = os.path.split(path)[1].split('.pkl')[0]
df = | pd.read_pickle(path) | pandas.read_pickle |
import os
import pytest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import pandas as pd
import numpy as np
from nnaps import fileio
from sklearn import preprocessing
from keras.layers import Dense, Input, Activation
from keras.models import Model, Sequential
class TestProcessorConversion:
def scaler2dict2scaler_test(self, scaler, data):
scaler.fit(data)
processors = {'processor': scaler}
processor_dict = fileio.processors2dict(processors)
processors_new = fileio.dict2processors(processor_dict)
new_scaler = processors_new['processor']
scaled_data = scaler.transform(data)
new_scaled_data = new_scaler.transform(data)
inv_scaled_data = new_scaler.inverse_transform(scaled_data)
np.testing.assert_array_equal(scaled_data, new_scaled_data,
err_msg="loaded scaler does not transform the same as original scaler")
np.testing.assert_almost_equal(data, inv_scaled_data, err_msg="data transformed by original and inverse" +
" transformed by loaded scaler does not equal original data.")
def test_standardScaler(self):
data = np.random.normal(10, 3, size=10)
data = np.array([data]).T
std_scaler = preprocessing.StandardScaler()
self.scaler2dict2scaler_test(std_scaler, data)
def test_robustScaler(self):
data = np.random.normal(10, 3, size=100)
data = np.array([data]).T
rob_scaler = preprocessing.RobustScaler()
self.scaler2dict2scaler_test(rob_scaler, data)
def test_minMaxScaler(self):
data = np.random.normal(10, 3, size=100)
data = np.array([data]).T
minmax_scaler = preprocessing.MinMaxScaler()
self.scaler2dict2scaler_test(minmax_scaler, data)
def test_maxAbsScaler(self):
data = np.random.normal(10, 3, size=100)
data = np.array([data]).T
maxabs_scaler = preprocessing.MaxAbsScaler()
self.scaler2dict2scaler_test(maxabs_scaler, data)
def test_oneHotEncoder(self):
data_int = np.random.randint(0, 3, size=10)
data = np.chararray(data_int.shape, itemsize=5)
data[:] = 'empty'
data = np.where(data_int == 0, 'type1', data)
data = np.where(data_int == 1, 'type2', data)
data = np.where(data_int == 2, 'type3', data)
data = np.where(data_int == 3, 'type4', data)
data = np.array([data]).T
encoder = preprocessing.OneHotEncoder()
encoder.fit(data)
processors = {'encoder': encoder}
processor_dict = fileio.processors2dict(processors)
processors = fileio.dict2processors(processor_dict)
encoder_new = processors['encoder']
scaled_data = encoder.transform(data)
scaled_data_new = encoder_new.transform(data)
assert np.all(scaled_data.nonzero()[0] == scaled_data_new.nonzero()[0]),\
"loaded encoder does not transform the same as original encoder"
assert np.all(scaled_data.nonzero()[1] == scaled_data_new.nonzero()[1]), \
"loaded encoder does not transform the same as original encoder"
scaled_data = encoder.transform(data)
inv_data = encoder_new.inverse_transform(scaled_data)
np.testing.assert_equal(data, inv_data, err_msg="data transformed by original and inverse transformed by" +
" loaded encoder does not equal original data.")
# ----------------------------------------------------------------------------------------------------------------------
@pytest.fixture(scope='function')
def test_scaler():
def test_scaler_helper(scaler):
data = pd.DataFrame(np.random.randn(100, 4), columns=list('ABCD'))
scaler.fit(data)
processors = {'scaler': scaler}
processor_dict = fileio.processors2dict(processors)
try:
fileio.save('test_scaler.h5', processor_dict)
processor_dict_new = fileio.load('test_scaler.h5', unpack=False)
finally:
if os.path.isfile('test_scaler.h5'):
os.remove('test_scaler.h5')
processors_new = fileio.dict2processors(processor_dict_new)
scaler_new = processors_new['scaler']
scaled_data = scaler.transform(data)
scaled_data_new = scaler_new.transform(data)
assert np.all(scaled_data.nonzero()[0] == scaled_data_new.nonzero()[0]), \
"{}: loaded scaler does not transform the same as original scaler".format(scaler.__class__)
assert np.all(scaled_data.nonzero()[1] == scaled_data_new.nonzero()[1]), \
"{}: loaded scaler does not transform the same as original scaler".format(scaler.__class__)
return test_scaler_helper
class TestSaveLoadScalers:
@pytest.mark.usefixtures("test_scaler")
def test_saveload_standardScaler(self, test_scaler):
scaler = preprocessing.StandardScaler()
test_scaler(scaler)
@pytest.mark.usefixtures("test_scaler")
def test_saveload_robustScaler(self, test_scaler):
scaler = preprocessing.RobustScaler()
test_scaler(scaler)
@pytest.mark.usefixtures("test_scaler")
def test_saveload_minMaxScaler(self, test_scaler):
scaler = preprocessing.MinMaxScaler()
test_scaler(scaler)
@pytest.mark.usefixtures("test_scaler")
def test_saveload_maxAbsScaler(self, test_scaler):
scaler = preprocessing.MaxAbsScaler()
test_scaler(scaler)
# ----------------------------------------------------------------------------------------------------------------------
class TestSaveLoadEncoders:
def test_saveLoad_processors(self):
data = dict(preprocessor='TestEncoder',
kwargs={ # OneHot encoder
'_n_values': 'auto',
'_categories': 'auto',
'_categorical_features': 'all',
'_legacy_mode': False,
'categories_': [np.array(['type1', 'type2', 'type3'], dtype='<U5')],
# Standard Scaler
'mean_': np.array([10.26388025]),
'var_': np.array([8.39983959]),
'scale1_': np.array([2.89824768]),
# Robust Scaler
'center_': np.array([9.99513811]),
'scale2_': np.array([3.99362846]),
# MinMax Scaler
'min_': np.array([-0.09978182]),
'data_min_': np.array([1.69929507]),
'data_max_': np.array([18.72940234]),
'data_range_': np.array([17.03010727]),
# MaxAbs Scaler
'max_abs_': np.array([18.72940234]),
})
try:
fileio.save('test.h5', data)
data_new = fileio.load('test.h5', unpack=False)
finally:
if os.path.isfile('test.h5'):
os.remove('test.h5')
keys = list(data['kwargs'].keys())
keys.remove('categories_')
for key in keys:
assert data['kwargs'][key] == data_new['kwargs'][key]
assert data_new['kwargs']['categories_'][0].dtype == '|S5', \
"hdf5 saving check when dealing with arrays of strings:\n" + \
"When saving a numpy array with strings, the returned type should be '|S..'\n" + \
"got dtype: {}".format(data_new['kwargs']['categories_'][0].dtype)
np.testing.assert_equal(data['kwargs']['categories_'][0],
np.array(data_new['kwargs']['categories_'][0], dtype='<U5'))
def test_saveload_onehotencoder_dtype_char(self):
data_int = np.random.randint(0, 3, size=10)
data = np.chararray(data_int.shape, itemsize=5)
data[:] = 'empty'
data = np.where(data_int == 0, 'type1', data)
data = np.where(data_int == 1, 'type2', data)
data = np.where(data_int == 2, 'type3', data)
data = np.where(data_int == 3, 'type4', data)
data = np.array([data]).T
encoder = preprocessing.OneHotEncoder()
encoder.fit(data)
processors = {'encoder': encoder}
processor_dict = fileio.processors2dict(processors)
try:
fileio.save('test_oneHot.h5', processor_dict)
processor_dict_new = fileio.load('test_oneHot.h5', unpack=False)
finally:
if os.path.isfile('test_oneHot.h5'):
os.remove('test_oneHot.h5')
processors_new = fileio.dict2processors(processor_dict_new)
encoder_new = processors_new['encoder']
scaled_data = encoder.transform(data)
scaled_data_new = encoder_new.transform(data)
assert np.all(scaled_data.nonzero()[0] == scaled_data_new.nonzero()[0]), \
"loaded encoder does not transform the same as original encoder"
assert np.all(scaled_data.nonzero()[1] == scaled_data_new.nonzero()[1]), \
"loaded encoder does not transform the same as original encoder"
scaled_data = encoder.transform(data)
inv_data = encoder_new.inverse_transform(scaled_data)
np.testing.assert_equal(data, inv_data,
err_msg="data transformed by original and inverse transformed by loaded encoder" +
" does not equal original data.")
def test_saveload_onehotencoder_dtype_object(self):
data_int = np.random.randint(0, 3, size=10)
data = np.chararray(data_int.shape, itemsize=5)
data[:] = 'empty'
data = np.where(data_int == 0, 'type1', data)
data = np.where(data_int == 1, 'type2', data)
data = np.where(data_int == 2, 'type3', data)
data = np.where(data_int == 3, 'type4', data)
data = np.array([data], dtype='object').T
encoder = preprocessing.OneHotEncoder()
encoder.fit(data)
processors = {'encoder': encoder}
processor_dict = fileio.processors2dict(processors)
try:
fileio.save('test_oneHot.h5', processor_dict)
processor_dict_new = fileio.load('test_oneHot.h5', unpack=False)
finally:
if os.path.isfile('test_oneHot.h5'):
os.remove('test_oneHot.h5')
processors_new = fileio.dict2processors(processor_dict_new)
encoder_new = processors_new['encoder']
scaled_data = encoder.transform(data)
scaled_data_new = encoder_new.transform(data)
assert np.all(scaled_data.nonzero()[0] == scaled_data_new.nonzero()[0]), \
"loaded encoder does not transform the same as original encoder"
assert np.all(scaled_data.nonzero()[1] == scaled_data_new.nonzero()[1]), \
"loaded encoder does not transform the same as original encoder"
scaled_data = encoder.transform(data)
inv_data = encoder_new.inverse_transform(scaled_data)
np.testing.assert_equal(data, inv_data,
err_msg="data transformed by original and inverse transformed by loaded encoder" +
" does not equal original data.")
class TestSaveLoadModel:
def test_saveload_model(self):
# make and train a very small model
inputs = Input(shape=(2,))
dense1 = Dense(10, activation='relu', name='FC_1')(inputs)
dense2 = Dense(5, activation='relu', name='FC_2')(dense1)
output1 = Dense(1, name='output1')(dense2)
output2 = Dense(1, name='output2')(dense2)
output3 = Dense(2, activation='softmax', name='output3')(dense2)
model = Model(inputs, [output1, output2, output3])
# v1 = np.random.normal(0, 2, 100)
# v2 = np.random.normal(0.3, 0.5, 100)
# v3 = np.random.normal(-0.3, 0.5, 100)
# X = np.array([v1, v2]).T
# y = [v3, v3]
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
# history = model.fit(X, y, epochs=1, batch_size=20, shuffle=True)
try:
fileio.safe_model(model, {}, [], [], [], {}, 'test.h5')
model_new, _, _, _, _, _, _ = fileio.load_model('test.h5')
finally:
os.remove('test.h5')
assert model.to_json() == model_new.to_json()
def test_saveload_history(self):
# the save function NEEDS a model to work
model = Sequential([
Dense(32, input_shape=(5,)),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
data = {'M1final_mae': [0.3, 0.2], 'val_M1final_mae': [0.31, 0.21], 'M1final_loss': [1.5, 1.3],
'val_M1final_loss': [1.6, 1.4], 'training_run': [1, 1]}
history = | pd.DataFrame(data=data) | pandas.DataFrame |
"""Script of my solution to DrivenData Modeling Women's Health Care Decisions
Use this script in the following way:
python solution.py <name-of-submission>
Argument is optional, the script will assign default name.
"""
from __future__ import division
import sys
import pdb
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn import multiclass
from XGBoostClassifier import XGBoostClassifier
np.random.seed(17411)
def multiclass_log_loss(y_true, y_prob, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples, n_classes]
y_prob : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_prob, eps, 1 - eps)
rows = y_prob.shape[0]
cols = y_prob.shape[1]
vsota = np.sum(y_true * np.log(predictions) + (1-y_true) * np.log(1-predictions))
vsota = vsota / cols
return -1.0 / rows * vsota
def load_train_data(path=None, train_size=0.8):
train_values = pd.read_csv('data/processed_train.csv')
train_labels = | pd.read_csv('data/train_labels.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
爬虫抓取工具
"""
import numpy as np
import time
import uuid
import sys
from mllib.utils import seleniumutil as util
import re
import lxml.html
import pandas as pd
from lxml import etree
from urllib.request import urlopen, Request
import requests
from pandas.compat import StringIO
from mllib.utils import config_vars as CONFIG
import random
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
# 嵌套查询, 针对那些嵌套的 html 一次取出所有 text, 返回一个大的字符串
from selenium.common.exceptions import WebDriverException
def scroll_mouse(driver):
try:
js1 = "window.scrollTo(0,250)"
js2 = "window.scrollTo(250,0)"
js3 = "window.scrollTo(0,document.body.scrollHeight)"
js_window_height = driver.execute_script('return document.body.scrollHeight')
driver.execute_script(js1)
time.sleep(1)
driver.execute_script(js2)
time.sleep(1)
driver.execute_script(js3)
time.sleep(1)
except WebDriverException:
print('页面下拉失败')
def get_all_children_elements_chrome(element):
result = ''
all_infos = util.find_element_by_xpath(element, './descendant-or-self::node()/text()')
for s in all_infos:
#print('type(s)', type(s))
#print('s', s)
result = result + ' ' + s.strip()
#print('result: ', result)
return result
def get_all_children_elements(element):
result = ''
all_infos = element[0].xpath('./descendant-or-self::node()/text()')
for s in all_infos:
#print('type(s)', type(s))
#print('s', s)
result = result + ' ' + s.strip()
#print('result: ', result)
return result
# 新浪财经数据
def get_sina_finance_data(retry_count = 3, pause = 0.01, dataArr=pd.DataFrame(), pageNo=1, endPage=3):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(CONFIG.SINA_URL%(pageNo), headers=CONFIG.HEADERS)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
sarr = [etree.tostring(node).decode('utf-8') for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = CONFIG.SINA_COLUMNS
dataArr = dataArr.append(df, ignore_index=True)
#a[last()]/@onclick
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0 and int(pageNo) < endPage:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return get_sina_finance_data(retry_count, pause, dataArr, pageNo=pageNo)
else:
return dataArr
except Exception as e:
print(e)
# 链家网数据
def get_lianjia_rent_data(retry_count = 3, pause = 0.01, dataArr=[], pageNo=1, endPage=3):
for _ in range(retry_count):
time.sleep(pause)
try:
request_1 = Request(CONFIG.LIANJIA_URL%(pageNo))
text_1 = urlopen(request_1, timeout=10).read()
text_1 = text_1.decode('utf-8')
html_1 = lxml.html.parse(StringIO(text_1))
res_1 = html_1.xpath("//*[@id=\"house-lst\"]/li/div[@class=\"info-panel\"]")
links_1 = html_1.xpath("//*[@id=\"house-lst\"]/li/div[@class=\"info-panel\"]/h2/a/@href")
for link in links_1:
request_2 = Request(link)
text_2 = urlopen(request_2, timeout=10).read()
text_2 = text_2.decode('utf-8')
html_2 = lxml.html.parse( | StringIO(text_2) | pandas.compat.StringIO |
# GA CUSTOMER REVENUE COMPETITION
# Updated kernel (11/11) with v2 files
# Read and preprocess all columns, except hits.
import gc
import os
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import json
import time
from ast import literal_eval
def load_df(file_name = 'train_v2.csv', nrows = None):
"""Read csv and convert json columns."""
USE_COLUMNS = [
'channelGrouping', 'date', 'device', 'fullVisitorId', 'geoNetwork',
'socialEngagementType', 'totals', 'trafficSource', 'visitId',
'visitNumber', 'visitStartTime', 'customDimensions',
#'hits'
]
JSON_COLUMNS = ['device', 'geoNetwork', 'totals', 'trafficSource']
df = pd.read_csv('../input/{}'.format(file_name),
converters={column: json.loads for column in JSON_COLUMNS},
dtype={'fullVisitorId': 'str'}, nrows=nrows, usecols=USE_COLUMNS)
for column in JSON_COLUMNS:
column_as_df = json_normalize(df[column])
column_as_df.columns = [f"{column}_{subcolumn}" for subcolumn in column_as_df.columns]
df = df.drop(column, axis=1).merge(column_as_df, right_index=True, left_index=True)
# Normalize customDimensions
df['customDimensions']=df['customDimensions'].apply(literal_eval)
df['customDimensions']=df['customDimensions'].str[0]
df['customDimensions']=df['customDimensions'].apply(lambda x: {'index':np.NaN,'value':np.NaN} if pd.isnull(x) else x)
column_as_df = | json_normalize(df['customDimensions']) | pandas.io.json.json_normalize |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 10:20:22 2021
@author: juliedson
"""
# Gera gráficos da pandemia para diversas cidades do Brasil
# import packages
import pandas as pd # dataframes
import matplotlib.pyplot as plt # plotting data
import seaborn as sns
# Inputing the city name
print("\033[1;32m\nOi! A coisa tá difícil, eu sei... \nMas juntos vamos passar por tudo isso!!! :D\033[m"\
"\n\nEscreva o nome da cidade no seguinte formato:\nCidade/UF",\
"\nExemplos: Natércia/MG; São Paulo/SP; Pouso Alegre/MG\n")
city = str(input('Nome do município: '))#'Heliodora/MG'#Fazer mecanismo de busca'
city_name = city.replace('/', '_')
print('\n\033[1;37mEntão vamos dar uma olhada em como está \033[1;36m'+str(city.replace('/', ' - '))+'...\033[m')
# Chosing a path to sabe the figs
#wanna_path = str(input('Deseja escolher alguma pasta para salvar? [S/N] ')).upper()[0]
#if wanna_path in 'SY':
# path = str(input('Digite aqui o caminho: '))
#else:
# path=''
df_cities = | pd.read_csv("https://github.com/wcota/covid19br/blob/master/cases-brazil-cities-time.csv.gz?raw=true", compression='gzip') | pandas.read_csv |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from operational_analysis.toolkits import power_curve
from operational_analysis.toolkits.power_curve.parametric_forms import *
noise = 0.1
class TestPowerCurveFunctions(unittest.TestCase):
def setUp(self):
np.random.seed(42)
params = [1300, -7, 11, 2, 0.5]
self.x = pd.Series(np.random.random(100) * 30)
self.y = pd.Series(logistic5param(self.x, *params) + np.random.random(100) * noise)
# power curve source: https://github.com/NREL/turbine-models/blob/master/Offshore/2020ATB_NREL_Reference_15MW_240.csv
self.nrel_15mw_wind = pd.Series(np.arange(4, 26))
self.nrel_15mw_power = pd.Series(np.array([720, 1239, 2271, 3817, 5876, 8450, 11536, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 15000, 1500]))
def test_IEC(self):
# Create test data using logistic5param form
curve = power_curve.IEC(self.x, self.y)
y_pred = curve(self.x)
# Does the IEC power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_IEC_with_bounds(self):
# Create the power curve with bounds at 4m/s adn 25m/s and bin width from power curve of 1m/s
cut_in = 4
cut_out = 25
curve = power_curve.IEC(self.nrel_15mw_wind, self.nrel_15mw_power, windspeed_start=cut_in, windspeed_end=cut_out, bin_width=1)
# Create the test data
test_windspeeds = np.arange(0, 31)
test_power = curve(test_windspeeds)
# Test all windspeeds outside of cut-in and cut-out windspeeds produce no power
should_be_zeros = test_power[(test_windspeeds < cut_in) | (test_windspeeds > cut_out)]
nptest.assert_array_equal(should_be_zeros, np.zeros(should_be_zeros.shape))
# Test all the valid windspeeds are equal
valid_power = test_power[(test_windspeeds >= cut_in) & (test_windspeeds <= cut_out)]
nptest.assert_array_equal(self.nrel_15mw_power, valid_power)
def test_logistic_5_param(self):
# Create test data using logistic5param form
curve = power_curve.logistic_5_parametric(self.x, self.y)
y_pred = curve(self.x)
# Does the logistic-5 power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=1, atol=noise * 2, err_msg="Power curve did not properly fit.")
def test_gam(self):
# Create test data using logistic5param form
curve = power_curve.gam(windspeed_column = self.x, power_column = self.y, n_splines = 20)
y_pred = curve(self.x)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def test_3paramgam(self):
# Create test data using logistic5param form
winddir = np.random.random(100)
airdens = np.random.random(100)
curve = power_curve.gam_3param(windspeed_column = self.x, winddir_column=winddir, airdens_column=airdens, power_column = self.y, n_splines = 20)
y_pred = curve(self.x, winddir, airdens)
# Does the spline-fit power curve match the test data?
nptest.assert_allclose(self.y, y_pred, rtol=0.05, atol = 20, err_msg="Power curve did not properly fit.")
def tearDown(self):
pass
class TestParametricForms(unittest.TestCase):
def setUp(self):
pass
def test_logistic5parameter(self):
y_pred = logistic5param(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
y_pred = logistic5param(np.array([1, 2, 3]), *[1300., -7., 11., 2., 0.5])
y = np.array([2.29403585, 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle integer inputs properly.")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, 7, 11, 2, 0.5)
y = np.array([ 1300.0 , 1300.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b>0).")
y_pred = logistic5param(np.array([0.01, 0.0]), 1300, -7, 11, 2, 0.5)
y = np.array([ 2.0 , 2.0 ])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not handle zero properly (b<0).")
def test_logistic5parameter_capped(self):
# Numpy array + Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=20.)
y = np.array([5., 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Numpy array + Upper and Lower Bound
y_pred = logistic5param_capped(np.array([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=10.)
y = np.array([5., 5.32662505, 10.])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Pandas Series + Upper and Lower Bound
y_pred = logistic5param_capped(pd.Series([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=20.)
y = pd.Series([5., 5.32662505, 15.74992462])
nptest.assert_allclose(y, y_pred, err_msg="Power curve did not properly fit.")
# Pandas Series + Upper and Lower Bound
y_pred = logistic5param_capped(pd.Series([1., 2., 3.]), *[1300., -7., 11., 2., 0.5], lower=5., upper=10.)
y = | pd.Series([5., 5.32662505, 10.]) | pandas.Series |
# coding: utf-8
# In[10]:
import numpy as np
import pandas as pd
from config import logger, config
from itertools import combinations
def haversine_array(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return h
def dummy_manhattan_distance(lat1, lng1, lat2, lng2):
a = haversine_array(lat1, lng1, lat1, lng2)
b = haversine_array(lat1, lng1, lat2, lng1)
return a + b
def bearing_array(lat1, lng1, lat2, lng2):
AVG_EARTH_RADIUS = 6371 # in km
lng_delta_rad = np.radians(lng2 - lng1)
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
y = np.sin(lng_delta_rad) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)
return np.degrees(np.arctan2(y, x))
# In[11]:
print('[+] loading data')
train = pd.read_csv(config.train_query_file)
test = pd.read_csv(config.test_query_file)
plans = pd.read_csv(config.plan_file)
# In[12]:
print('[+] generating features')
train['o_lat'] = train['o'].map(lambda x: float(x.split(',')[0]))
train['o_lon'] = train['o'].map(lambda x: float(x.split(',')[1]))
train['d_lat'] = train['d'].map(lambda x: float(x.split(',')[0]))
train['d_lon'] = train['d'].map(lambda x: float(x.split(',')[1]))
test['o_lat'] = test['o'].map(lambda x: float(x.split(',')[0]))
test['o_lon'] = test['o'].map(lambda x: float(x.split(',')[1]))
test['d_lat'] = test['d'].map(lambda x: float(x.split(',')[0]))
test['d_lon'] = test['d'].map(lambda x: float(x.split(',')[1]))
feat = pd.concat((train[['sid','o_lat', 'o_lon', 'd_lat', 'd_lon']], test[['sid','o_lat', 'o_lon', 'd_lat', 'd_lon']]))
print('[+] lon lat distance features')
feat['od_haversine_dist'] = feat[['o_lat', 'o_lon', 'd_lat', 'd_lon']].apply(lambda x: haversine_array(x[0], x[1], x[2], x[3]), axis=1)
feat['od_manhattan_dist'] = feat[['o_lat', 'o_lon', 'd_lat', 'd_lon']].apply(lambda x: dummy_manhattan_distance(x[0], x[1], x[2], x[3]), axis=1)
feat['od_bearing'] = feat[['o_lat', 'o_lon', 'd_lat', 'd_lon']].apply(lambda x: bearing_array(x[0], x[1], x[2], x[3]), axis=1)
print('[+] lon lat cluster features')
coords = np.vstack((train[['o_lat', 'o_lon']].values,
train[['d_lat', 'd_lon']].values,
test[['o_lat', 'o_lon']].values,
test[['d_lat', 'd_lon']].values))
from sklearn.cluster import MiniBatchKMeans
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=50, batch_size=10000).fit(coords[sample_ind])
feat['o_coord_cluster'] = kmeans.predict(feat[['o_lat', 'o_lon']])
feat['d_coord_cluster'] = kmeans.predict(feat[['d_lat', 'd_lon']])
print('[+] lon lat PCA features')
from sklearn.decomposition import PCA
pca = PCA().fit(coords)
feat['o_coord_pca0'] = pca.transform(feat[['o_lat', 'o_lon']])[:, 0]
feat['o_coord_pca1'] = pca.transform(feat[['o_lat', 'o_lon']])[:, 1]
feat['d_coord_pca0'] = pca.transform(feat[['d_lat', 'd_lon']])[:, 0]
feat['d_coord_pca1'] = pca.transform(feat[['d_lat', 'd_lon']])[:, 1]
print('[+] agg features')
t = | pd.crosstab(index=plans['sid'], columns=plans['transport_mode'], values=plans['distance'], aggfunc=np.mean) | pandas.crosstab |
import unittest
import pandas as pd
from mmvec.heatmap import (
_parse_taxonomy_strings, _parse_heatmap_metadata_annotations,
_process_microbe_metadata, _process_metabolite_metadata,
_normalize_table)
import pandas.util.testing as pdt
class TestParseTaxonomyStrings(unittest.TestCase):
def setUp(self):
self.taxa = pd.Series([
'k__Bacteria; p__Proteobacteria; c__Deltaproteobacteria; '
'o__Desulfobacterales; f__Desulfobulbaceae; g__; s__',
'k__Bacteria; p__Cyanobacteria; c__Chloroplast; o__Streptophyta',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Lardizabala; s__biternata',
'k__Archaea; p__Euryarchaeota; c__Methanomicrobia; '
'o__Methanosarcinales; f__Methanosarcinaceae; g__Methanosarcina',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Pavlova; s__lutheri',
'k__Archaea; p__[Parvarchaeota]; c__[Parvarchaea]; o__WCHD3-30',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Sphingomonadales; f__Sphingomonadaceae'],
index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'),
name='Taxon')
self.exp = pd.Series(
['s__', 'o__Streptophyta', 's__biternata', 'g__Methanosarcina',
's__lutheri', 'o__WCHD3-30', 'f__Sphingomonadaceae'],
index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'),
name='Taxon')
def test_parse_taxonomy_strings(self):
exp = pd.Series(['p__Proteobacteria', 'p__Cyanobacteria',
'p__Proteobacteria', 'p__Euryarchaeota',
'p__Proteobacteria', 'p__[Parvarchaeota]',
'p__Proteobacteria'],
index=pd.Index([c for c in 'ABCDEFG'],
name='feature-id'), name='Taxon')
obs = _parse_taxonomy_strings(self.taxa, level=2)
pdt.assert_series_equal(exp, obs)
def test_parse_taxonomy_strings_baserank(self):
exp = pd.Series(['k__Bacteria', 'k__Bacteria', 'k__Bacteria',
'k__Archaea', 'k__Bacteria', 'k__Archaea',
'k__Bacteria'],
index=pd.Index([c for c in 'ABCDEFG'],
name='feature-id'), name='Taxon')
obs = _parse_taxonomy_strings(self.taxa, level=1)
pdt.assert_series_equal(exp, obs)
def test_parse_taxonomy_strings_toprank(self):
# expect top rank even if level is higher than depth of top rank
obs = _parse_taxonomy_strings(self.taxa, level=7)
pdt.assert_series_equal(self.exp, obs)
def test_parse_taxonomy_strings_rank_out_of_range_is_top(self):
# expect top rank even if level is higher than depth of top rank
obs = _parse_taxonomy_strings(self.taxa, level=9)
pdt.assert_series_equal(self.exp, obs)
class TestHeatmapAnnotation(unittest.TestCase):
def setUp(self):
self.taxonomy = pd.Series(
['k__Bacteria', 'k__Archaea', 'k__Bacteria', 'k__Archaea'],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
def test_parse_heatmap_metadata_annotations_colorhelix(self):
exp_cols = pd.Series(
[[0.8377187772618228, 0.7593149036488329, 0.9153517040128891],
[0.2539759281991313, 0.3490084835469758, 0.14482988411775732],
[0.8377187772618228, 0.7593149036488329, 0.9153517040128891],
[0.2539759281991313, 0.3490084835469758, 0.14482988411775732]],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
exp_classes = {'k__Archaea': [0.2539759281991313, 0.3490084835469758,
0.14482988411775732],
'k__Bacteria': [0.8377187772618228, 0.7593149036488329,
0.9153517040128891]}
cols, classes = _parse_heatmap_metadata_annotations(
self.taxonomy, 'colorhelix')
pdt.assert_series_equal(exp_cols, cols)
self.assertDictEqual(exp_classes, classes)
def test_parse_heatmap_metadata_annotations_magma(self):
exp_cols = pd.Series(
[(0.944006, 0.377643, 0.365136), (0.445163, 0.122724, 0.506901),
(0.944006, 0.377643, 0.365136), (0.445163, 0.122724, 0.506901)],
index=pd.Index([c for c in 'ABCD'], name='id'), name='Taxon')
exp_classes = {'k__Archaea': (0.445163, 0.122724, 0.506901),
'k__Bacteria': (0.944006, 0.377643, 0.365136)}
cols, classes = _parse_heatmap_metadata_annotations(
self.taxonomy, 'magma')
pdt.assert_series_equal(exp_cols, cols)
self.assertDictEqual(exp_classes, classes)
class TestMetadataProcessing(unittest.TestCase):
def setUp(self):
self.taxonomy = pd.Series(
['k__Bacteria', 'k__Archaea', 'k__Bacteria'],
index=pd.Index([c for c in 'ABC']), name='Taxon')
self.metabolites = pd.Series([
'amino acid', 'carbohydrate', 'drug metabolism'],
index=pd.Index(['a', 'b', 'c']), name='Super Pathway')
self.ranks = pd.DataFrame(
[[4, 1, 2, 3], [1, 2, 1, 2], [2, 4, 3, 1], [6, 4, 2, 3]],
index=pd.Index([c for c in 'ABCD']), columns=[c for c in 'abcd'])
# test that metadata processing works, filters ranks, and works in sequence
def test_process_metadata(self):
# filter on taxonomy, taxonomy parser/annotation tested above
with self.assertWarnsRegex(UserWarning, "microbe IDs are present"):
res = _process_microbe_metadata(
self.ranks, self.taxonomy, -1, 'magma')
ranks_filtered = pd.DataFrame(
[[4, 1, 2, 3], [1, 2, 1, 2], [2, 4, 3, 1]],
index=pd.Index([c for c in 'ABC']), columns=[c for c in 'abcd'])
| pdt.assert_frame_equal(ranks_filtered, res[1]) | pandas.util.testing.assert_frame_equal |
import pandas as pd
from xlsx2csv import Xlsx2csv
import sys
daytoprocess = sys.argv[1]
print("Convert XLSX to CSV")
Xlsx2csv("../data/reports/xlsx/"+daytoprocess+".xlsx", outputencoding="utf-8").convert("../data/reports/csv/"+daytoprocess+".csv")
print("Load csv")
df = pd.read_csv("../data/reports/csv/"+daytoprocess+".csv", header=3)
print("Drop useless column")
df = df.drop(columns={'Libellé A21'})
dep = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '21', '22', '23', '24', '25', '26', '27', '28', '29', '2A', '2B', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '971', '972', '973', '974', '976', 'ND', 'total']
print("Shape doit être égal à 24 : "+str(df.shape[0]))
print("Concat in one df")
df = df.iloc[:, :-2]
i = 0
j = 0
k = 0
for column in df:
if((i != 0) & (k < len(dep))):
if(j % 2 == 1):
df = df.rename(columns={column: "montant_"+dep[k]})
j = 0
k = k + 1
else:
df = df.rename(columns={column: "nombre_"+dep[k]})
j = j + 1
i = i + 1
df = df[:24]
print("Generate one simple df")
df2 = df[['A21',"nombre_"+dep[0],"montant_"+dep[0]]]
df2['code_departement'] = dep[0]
df2 = df2.rename(columns={"nombre_"+dep[0]: 'nombre',"montant_"+dep[0]:'montant'})
i = 0
for i in range(len(dep)):
if(i != 0):
dfinter = df[['A21',"nombre_"+dep[i],"montant_"+dep[i]]]
dfinter['code_departement'] = dep[i]
dfinter = dfinter.rename(columns={"nombre_"+dep[i]: 'nombre',"montant_"+dep[i]:'montant'})
df2 = df2.append(dfinter)
df3 = df2
df3['ND'] = df3['nombre'].apply(lambda x: True if x == 'ND' else False)
df3['nombre'] = df3['nombre'].apply(lambda x: x if x != 'ND' else 0)
df3['montant'] = df3['montant'].apply(lambda x: x if x != 'ND' else 0)
df3['nombre'] = df3['nombre'].astype(float)
df3['montant'] = df3['montant'].astype(float)
dftotal = df3[df3['A21'] == 'TOTAL']
dfdep = df3[df3['A21'] != 'TOTAL']
print("Put ND values in Z section")
for i in range(len(dep)):
dfinter = dfdep[dfdep['code_departement'] == dep[i]][['montant','code_departement']]
somme = dfinter.groupby(['code_departement']).sum()['montant'].iloc[0]
total = dftotal[dftotal['code_departement'] == dep[i]][['montant']]['montant'].iloc[0]
delta = total - somme
dfdep.loc[((dfdep.code_departement == dep[i]) & (dfdep['A21'] == 'Z')), 'montant'] = delta + dfdep[((dfdep.code_departement == dep[i]) & (dfdep['A21'] == 'Z'))]['montant'].iloc[0]
dfinter2 = dfdep[dfdep['code_departement'] == dep[i]][['nombre','code_departement']]
somme2 = dfinter2.groupby(['code_departement']).sum()['nombre'].iloc[0]
total2 = dftotal[dftotal['code_departement'] == dep[i]][['nombre']]['nombre'].iloc[0]
delta2 = total2 - somme2
dfdep.loc[((dfdep.code_departement == dep[i]) & (dfdep['A21'] == 'Z')), 'nombre'] = delta2 + dfdep[((dfdep.code_departement == dep[i]) & (dfdep['A21'] == 'Z'))]['nombre'].iloc[0]
dfdep['code_departement'] = dfdep['code_departement'].apply(lambda x: None if x == 'ND' else x)
print("Exclude total info")
dffinal = dfdep[dfdep['code_departement'] != 'total']
print("Get Region in dataframe")
dep = pd.read_csv("../utils/departement2019.csv",dtype={'dep':str,'reg':str})
dep = dep[['dep','reg']]
dep = dep.rename(columns={'dep':'code_departement'})
dffinal2 = | pd.merge(dffinal, dep, on='code_departement',how='left') | pandas.merge |
'''
Created on 13-Jul-2018
@author: <NAME>
'''
# We will import data set from sklearn
from sklearn.datasets import load_boston
# Load data set
data = load_boston()
# Print dictionary keys
print(data.keys())
'''
dict_keys(['data', 'target', 'DESCR', 'feature_names'])
'''
# Print feature description
#print(data.DESCR)
# import Pandas for data frame operations
import pandas as pd
# Create Pandas data frame using dictionary fields
# data contains data and feature_names will be columns
data_frame = | pd.DataFrame(data.data,columns=data.feature_names) | pandas.DataFrame |
import pandas as pd
import string
import numpy as np
from collections import Counter
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
import itertools
import math
#nltk.download('averaged_perceptron_tagger')
#nltk.download('wordnet') #uncomment if wordnet is not already downloaded
lemmatiser = WordNetLemmatizer()
#function for creating lemmas/ types
def make_singular(response): #lemmatize nouns. If the repsonse has multiple words, lemmatize just the last word
response = response.split(' ')
singular = lemmatiser.lemmatize(response[-1], pos="n")
if len(response)==1:
return singular
else:
return ' '.join((' '.join(response[0:-1]),singular))
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return ''
def lemmatize_pos(response): #lemmatize responses
response = response.split(' ')
pos_tags = nltk.pos_tag(response)
lemma_list=[]
for i in range(len(response)):
wordnet_pos=get_wordnet_pos(pos_tags[i][1])
if wordnet_pos == '':
lemma_list.append(lemmatiser.lemmatize(response[i]))
else:
lemma_list.append(lemmatiser.lemmatize(response[i], pos=wordnet_pos))
return lemma_list
#define function for computing cosine similarity based on counter
def counter_cosine_similarity(c1, c2):
terms = set(c1).union(c2)
dotprod = sum(c1.get(k, 0) * c2.get(k, 0) for k in terms)
magA = math.sqrt(sum(c1.get(k, 0)**2 for k in terms))
magB = math.sqrt(sum(c2.get(k, 0)**2 for k in terms))
return dotprod / (magA * magB)
#read in data set
d = pd.read_csv("../data/vcs_naming_final_data_cleaned.csv")
#d = pd.read_csv("test.csv") #test data set
#check out the data (first 10 rows)
d.loc[range(10),]
#add column that specifies angle
d.loc[:,'angle']=[item.replace("VCS_","").replace(".png","") for item in d["image"]]
#array of unique category names
categoryNames=np.unique(d["angle"])
pairList=list(itertools.combinations(categoryNames,2))
item_list=[]
word_cosine_sim_list=[]
lemma_cosine_sim_list=[]
response_cosine_sim_list=[]
distance_list=[]
for (category1,category2) in pairList:
print (category1,category2)
#set up list of responses for each category
wordListResponse_1=[]
lemmaListResponse_1=[]
responseLengthList_1=[]
completeWordList_1=[]
completeLemmaList_1=[]
responseList_1=[]
#loop through each response for that category
for response in d.loc[d["angle"]==category1,"naming_response"]:
#break response into a list of unique words while stripping punctuation
#look up list comprehension in python to try to break down what's going on here
#I first remove any punctuation/ unusual characters from the response (except apostrophes)
chars_to_remove = '!"#$%&\()*+,-./:;<=>?@[\\]^_`{|}~' #string.punctuation potentially messes with contractions
response_punctuation_cleaned=" ".join([y.translate(string.maketrans("",""), chars_to_remove) for y in str(response).lower().split(" ") if (y != "") & (y!=".")])
#now tokenize
curWordList = nltk.word_tokenize(response_punctuation_cleaned) #tokenize
curLemmaList=[lemmatize_pos(x) for x in curWordList]
#flatten list
curLemmaList=[y for x in curLemmaList for y in x]
#add to list of word response lists
wordListResponse_1.append(curWordList)
lemmaListResponse_1.append(curLemmaList)
#add to list tracking the number of words in each response
responseLengthList_1.append(len(curWordList))
#list of all individual word responses
completeWordList_1 = completeWordList_1 + curWordList
completeLemmaList_1 = completeLemmaList_1 + curLemmaList
responseList_1.append(".".join(curWordList))
#set up list of responses for each category
wordListResponse_2=[]
lemmaListResponse_2=[]
responseLengthList_2=[]
completeWordList_2=[]
completeLemmaList_2=[]
responseList_2=[]
#loop through each response for that category
for response in d.loc[d["angle"]==category2,"naming_response"]:
#break response into a list of unique words while stripping punctuation
#look up list comprehension in python to try to break down what's going on here
#I first remove any punctuation/ unusual characters from the response (except apostrophes)
chars_to_remove = '!"#$%&\()*+,-./:;<=>?@[\\]^_`{|}~' #string.punctuation potentially messes with contractions
response_punctuation_cleaned=" ".join([y.translate(string.maketrans("",""), chars_to_remove) for y in str(response).lower().split(" ") if (y != "") & (y!=".")])
#now tokenize
curWordList = nltk.word_tokenize(response_punctuation_cleaned) #tokenize
curLemmaList=[lemmatize_pos(x) for x in curWordList]
#flatten list
curLemmaList=[y for x in curLemmaList for y in x]
#add to list of word response lists
wordListResponse_2.append(curWordList)
lemmaListResponse_2.append(curLemmaList)
#add to list tracking the number of words in each response
responseLengthList_2.append(len(curWordList))
#list of all individual word responses
completeWordList_2 = completeWordList_2 + curWordList
completeLemmaList_2 = completeLemmaList_2 + curLemmaList
responseList_2.append(".".join(curWordList))
#cosine similarity computations
word_cosine_sim=counter_cosine_similarity(Counter(completeWordList_1), Counter(completeWordList_2))
lemma_cosine_sim=counter_cosine_similarity(Counter(completeLemmaList_1), Counter(completeLemmaList_2))
response_cosine_sim=counter_cosine_similarity(Counter(responseList_1), Counter(responseList_2))
non_integer_set = ["triangle","dog","square"]
if (category1 in non_integer_set) or (category2 in non_integer_set):
item_list.append(category1+"_"+category2)
distance_list.append("NA")
elif int(category1)>int(category2):
item_list.append(category2+"_"+category1)
distance_list.append(min(int(category1)-int(category2),360-(int(category1)-int(category2))))
else:
item_list.append(category1+"_"+category2)
distance_list.append(min(int(category2)-int(category1),360-(int(category2)-int(category1))))
word_cosine_sim_list.append(word_cosine_sim)
lemma_cosine_sim_list.append(lemma_cosine_sim)
response_cosine_sim_list.append(response_cosine_sim)
#put everything in a data frame
df = | pd.DataFrame({'image_pair': item_list,'distance': distance_list, 'word_cosine_sim': word_cosine_sim_list, 'lemma_cosine_sim': lemma_cosine_sim_list, 'response_cosine_sim': response_cosine_sim_list}) | pandas.DataFrame |
import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os
from src.conf import CORRELATIONS_DAYS, BATCH_LEN, STOCKS_TO_WATCH, \
LABEL_TO_PREDICT, CORR_ANALYSIS_PKL
from src.visualization.matplot_graphs import plot_stock
from src.data_functions.data_load import get_dataframe
def detaCorrelation(df, stocksName):
""" graphs data correlation
do not run this functions with a large dataframe it would take for ever
"""
df_corr = df.corr()
data = df_corr.values
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
heatmap = ax.pcolor(data, cmap=plt.cm.RdYlGn)
fig.colorbar(heatmap)
ax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
column_labels = df_corr.columns
row_labels = df_corr.index
ax.set_xticklabels(column_labels)
ax.set_yticklabels(row_labels)
plt.xticks(rotation=90)
heatmap.set_clim(-1, 1)
plt.tight_layout()
plt.show()
class correlations():
def __init__(self, best_match, top_samples=2, set_len=BATCH_LEN):
# 2 pluss, in the process of correlation 2 values would be lost
self.set_len = set_len+2
self.best_match = best_match
self.n_samples = top_samples
self.corr_stocks = None
self.pred = None
def get_correlation(self, df_in, stock_to_predict):
# roll the desired dataset
df = df_in.copy()
stock_vector = df[stock_to_predict].shift(-CORRELATIONS_DAYS)
# filter correct moves
if self.best_match:
self.pred, tmp_stock0 = df[-self.set_len:].pct_change()[1:], stock_vector[-self.set_len:-1].pct_change()[1:]
# drop any invalid column
self.pred.dropna(inplace=True, axis=1)
self.pred[self.pred < 0] = -1
self.pred[self.pred > 0] = 1
tmp_stock0[tmp_stock0 < 0] = -1
tmp_stock0[tmp_stock0 > 0] = 1
# subtract and absolute value
sub = self.pred[:-1].sub(tmp_stock0, axis=0)
sub[sub < 0] = sub[sub < 0]*-1
top_match = sub.sum(axis=0)
top_match = top_match.sort_values(ascending=True)
min_matches = int(top_match[self.n_samples-1])
top = top_match[top_match <= min_matches]
dataset = df[top.index]
n_best_corr = len(top.index)
# print('ground truth:\n', tmp_stock0)
# print(len(top.index), ' best match:\n', self.pred[top.index])
else:
dataset = df
# make data correlation
tmp_df, tmp_stock = dataset[-self.set_len:-1].pct_change()[1:], stock_vector[-self.set_len:-1].pct_change()[1:]
correlation = tmp_df.corrwith(tmp_stock, axis=0)
# correlation[top.index[0]] += 1
top3 = correlation.sort_values(ascending=False)[:self.n_samples]
# top3[0] -= 1
# filter worst correlation
if self.best_match:
top_match = top_match.sort_values(ascending=False)
min_matches = int(top_match[self.n_samples-1])
top = top_match[top_match >= min_matches]
n_inv_best_corr = len(top.index)
# print(len(top.index), ' inverse match:\n', self.pred[top.index])
dataset = df[top.index]
tmp_df = dataset[-self.set_len:-1].pct_change()[1:]
tmp_df.dropna(inplace=True, axis=1)
correlation = tmp_df.corrwith(tmp_stock, axis=0)
# correlation[top.index[0]] -= 1
bottom3 = correlation.sort_values(ascending=True)[:self.n_samples]
# bottom3[0] += 1
self.corr_stocks = | pd.concat((top3, bottom3)) | pandas.concat |
from pyjamas_core import Supermodel
from pyjamas_core.util import Input, Output, Property
from datetime import datetime, timedelta
from Models._utils.time import datetime2utc_time, utc_time2datetime
import numpy as np
from pytz import timezone
import json
from scipy.interpolate import griddata
import pandas as pd
import os
# define the model class and inherit from class "Supermodel"
class Model(Supermodel):
# model constructor
def __init__(self, id, name: str):
# instantiate supermodel
super(Model, self).__init__(id, name)
# define inputs
self.inputs['mode'] = Input(name='modus', unit='-', info="modus (is live of not")
self.inputs['KW'] = Input(name='KW info', unit='-', info="KW informations (u.a. id, lat, lon)")
self.inputs['date'] = Input(name='Futures', unit='s', info="Time vector of futures in utc timestamp [s]")
# define outputs
self.outputs['KW_weather'] = Output(name='weather data of KWs', unit='date, °C, m/s, W/m^2', info='weather data of KWs')
self.outputs['Futures_weather'] = Output(name='weather data', unit='date, °C, m/s, W/m^2', info='(future) weather data (temperature, wind speed, radiation)')
# define properties
self.properties['T_offset'] = Property(default=0., data_type=float, name='temperature offset', unit='%', info="offset of temperature in %")
self.properties['u_offset'] = Property(default=0., data_type=float, name='wind speed offset', unit='%', info="offset of wind speed in %")
self.properties['P_offset'] = Property(default=0., data_type=float, name='radiation offset', unit='%', info="offset of radiation in %")
self.properties['ref_year'] = Property(default=2007, data_type=int, name='reference year', unit='-', info="reference year for modeled weather")
# define persistent variables
self.data_hist = None
self.data_hist_year = None
self.ref_year = None
async def func_birth(self):
# read historic data from file
self.data_hist = self.historic_data_read()
async def func_amend(self, keys=[]):
# if the refence year changes, select new historic reference data based on ref_year
if 'ref_year' in keys:
self.ref_year = self.get_property('ref_year')
self.data_hist_year = self.historic_select_year()
async def func_peri(self, prep_to_peri=None):
# get inputs
islife = await self.get_input('mode')
KW_data_orig = await self.get_input('KW')
KW_data = {k: KW_data_orig[k] for k in ('id', 'kw_bezeichnung', 'latitude', 'longitude')}
futures = await self.get_input('date')
# prepare weather data, dependent on modus live or not
islive = False
if islive:
# live: take current weather data forecast by API
weather_data = self.prepare_API_weather()
else:
# not live: take historic weather data from a reference year
weather_data = self.prepare_historic_weather(futures)
# KW weather
# interpolate weather data in times and locations for the different KW's
KW_weather_data = self.KW_weather_data(KW_data, weather_data, futures)
# futures weather
# editing weather data for further use (e.g. power demand model)
futures_weather_data = weather_data.tolist() #self.future_weather_data(futures, weather_data)
# set output
self.set_output("KW_weather", KW_weather_data)
self.set_output("Futures_weather", futures_weather_data)
@staticmethod
def historic_data_read():
# read historic weather data
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
filename = os.path.join(dir_path, 'confidential', 'dict_hist')
with open(filename, 'r') as f:
data_hist = json.load(f)
return data_hist
def historic_select_year(self):
# copy all historical data to new dict
data = dict((k, v) for k, v in self.data_hist.items())
# define start and date of the reference year
start_date = datetime(self.ref_year, 1, 1, 0, 0)
start_date = datetime2utc_time(start_date)
end_date = datetime(self.ref_year+1, 1, 1, 0, 0)
end_date = datetime2utc_time(end_date)
# extract time, temperature, wind speed and radiation from dict
time = np.array(data["times"])
time = time[np.newaxis, :]
temp = np.array(data["temperature"]["values"])
wind = np.array(data["windspeed"]["values"])
rad = np.array(data["radiation"]["values"])
# create numpy array of time, temperature, wind speed and radiation
# and select the ones within the reference year
matrix = np.append(time, temp, axis=0)
matrix = np.append(matrix, wind, axis=0)
matrix = np.append(matrix, rad, axis=0)
matrix = matrix.transpose()
matrix = matrix[(matrix[:, 0] >= start_date) & (matrix[:, 0] <= end_date)]
matrix = matrix.transpose()
# write selected data back to dict
data["times"] = matrix[0, :].tolist()
data["temperature"]["values"] = matrix[1:26, :].tolist()
data["windspeed"]["values"] = matrix[26:51, :].tolist()
data["radiation"]["values"] = matrix[51:76, :].tolist()
return data
def prepare_API_weather(self):
# not working so far
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
filename = os.path.join(dir_path, 'confidential', 'API_Key')
with open(filename, "r") as f:
API_key = f.readline()
url_ad = API_key
#weather_API_read = requests.get(url_ad).json()
#with open('confidential/dict_API', 'w') as fp:
# json.dump(weather_API_read, fp)
def prepare_historic_weather(self, futures):
# set futures back to ref_year
futures_shifted = self.dates_shift(futures)
# filter historic weather data
# - around the shifted futures
data_filtered = self.data_filter(futures_shifted)
# create data base of historic weather data
# - formatting filtered data from dict to numpy array
data_base = self.create_database(data_filtered)
# forecast weather data (shift of historic weather data)
# not implemented so far
#forecast_data = self.datahist_shift(data_base, futures[0])
return data_base
def dates_shift(self, dates):
# shift dates (futures) back to reference year
dates = [utc_time2datetime(x) for x in dates]
date_1 = dates[0]
date_1_ref = date_1.replace(year=self.ref_year)
date_shift = date_1-date_1_ref
dates_shifted = [x-date_shift for x in dates]
year_1 = dates_shifted[0].year
dates_shifted = [x.replace(year=year_1) for x in dates_shifted]
dates_shifted = [datetime2utc_time(x) for x in dates_shifted]
return dates_shifted
def data_filter(self, dates):
# create dict copy of historical reference year weather data
data = {k: v for k, v in self.data_hist_year.items()}
# extract the futures
ref_times = data['times']
ref_times = np.array(ref_times)
# first and last date of futures
date_first = dates[0]
date_last = dates[len(dates)-1]
# extract limit reference times around futures
# - last reference time before first future
# - first reference time after last future
date_before_first = np.max(ref_times[ref_times <= date_first])
date_after_last = np.min(ref_times[ref_times >= date_last])
# extract futures, temperature, wind speed and radiation from data dict
time = np.array(data["times"])
time = time[np.newaxis, :]
temp = np.array(data["temperature"]["values"])
wind = np.array(data["windspeed"]["values"])
rad = np.array(data["radiation"]["values"])
# create data matrix with futures, temperature, wind speed and radiation
matrix = np.append(time, temp, axis=0)
matrix = np.append(matrix, wind, axis=0)
matrix = np.append(matrix, rad, axis=0)
matrix = matrix.transpose()
# filter reference weather data within limit reference times
# - all futures within a year
if date_first < date_last:
matrix = matrix[(matrix[:, 0] >= date_before_first) & (matrix[:, 0] <= date_after_last)]
# - futures with turn of the year
else:
matrix = matrix[(matrix[:, 0] <= date_after_last) | (matrix[:, 0] >= date_before_first)]
matrix = matrix.transpose()
# update dict
data2 = {"ids": data["ids"], "lat": data["lat"], "lon": data["lon"], "asl": data["asl"],
"times": matrix[0, :].tolist(),
"temperature": {'height': data["temperature"]['height'],
'unit': data["temperature"]['unit'],
"values": matrix[1:26, :].tolist()},
"windspeed": {'height': data["windspeed"]['height'],
'unit': data["windspeed"]['unit'],
"values": matrix[26:51, :].tolist()},
"radiation": {'height': data["radiation"]['height'],
'unit': data["radiation"]['unit'],
"values": matrix[51:76, :].tolist()}
}
return data2
def create_database(self, data_filtered):
# extract number of locations (lat/lon) and number of futures
num_points = data_filtered["lat"].__len__()
num_times = data_filtered["times"].__len__()
# initialize latitude, longitude, time, temperature, wind speed and radiation vectors
# and fill them by extraction of dict
lat_vec = []
lon_vec = []
time_vec = np.tile(np.array(data_filtered["times"]), num_points)
temp_vec = []
wind_vec = []
rad_vec = []
for it in range(0, num_points):
lat_vec.append(np.repeat(data_filtered["lat"][it], num_times))
lon_vec.append(np.repeat(data_filtered["lon"][it], num_times))
temp_vec.append(data_filtered["temperature"]["values"][it])
wind_vec.append(data_filtered["windspeed"]["values"][it])
rad_vec.append(data_filtered["radiation"]["values"][it])
# change format to array and transposing
lat_vec = np.array([lat_vec]).ravel()
lon_vec = np.array(lon_vec).ravel()
time_vec = np.array(time_vec).ravel()
temp_vec = np.array(temp_vec).ravel()
wind_vec = np.array(wind_vec).ravel()
rad_vec = np.array(rad_vec).ravel()
lat_vec = lat_vec[np.newaxis, :].transpose()
lon_vec = lon_vec[np.newaxis, :].transpose()
time_vec = time_vec[np.newaxis, :].transpose()
temp_vec = temp_vec[np.newaxis, :].transpose()
wind_vec = wind_vec[np.newaxis, :].transpose()
rad_vec = rad_vec[np.newaxis, :].transpose()
# offset for temperature, wind speed and radiation
temp_vec = np.multiply(temp_vec, (1 + self.get_property('T_offset') / 100))
wind_vec = np.multiply(wind_vec, (1 + self.get_property('u_offset') / 100))
rad_vec = np.multiply(rad_vec, (1 + self.get_property('P_offset') / 100))
# create matrix
data_base = np.concatenate((lat_vec, lon_vec, time_vec, temp_vec, wind_vec, rad_vec), axis=1)
return data_base
def KW_weather_data(self, KW_data, weather_data, futures):
# naming of columns
# - of KW_data (ones to be extracted)
KW_data_columns = ['id', 'kw_bezeichnung', 'latitude', 'longitude']
# shift futures back (to agree with historic data
futures = self.dates_shift(futures)
# create data frame from KW_data dict
KW_data_df = pd.DataFrame(KW_data)
# select only photovoltaic and wind turbine data
PV_data = KW_data_df.loc[KW_data_df[KW_data_columns[1]] == 'Photovoltaik']
WT_data = KW_data_df.loc[KW_data_df[KW_data_columns[1]] == 'Windturbine']
# create data frame from weather data base (array)
weather_df = pd.DataFrame(data=weather_data, columns=['lat', 'lon', 'time', 'temperature', 'windspeed', 'radiation'])
# select relevant columns for photovoltaics and wind turbines
weather_PV = weather_df[['lat', 'lon', 'time', 'radiation']]
weather_WT = weather_df[['lat', 'lon', 'time', 'windspeed']]
# 2D interpolation over KW locations (latitude/longitude)
time_vec = weather_df['time'].unique()
PV_weather_2D = | pd.DataFrame() | pandas.DataFrame |
import random
import pandas as pd
import pytest
from evalml.preprocessing.data_splitters import BalancedClassificationSampler
@pytest.mark.parametrize("ratio,samples,percentage,seed",
[(1, 1, 0.2, 1),
(3.3, 101, 0.5, 100)])
def test_balanced_classification_init(ratio, samples, percentage, seed):
bcs = BalancedClassificationSampler(balanced_ratio=ratio, min_samples=samples, min_percentage=percentage, random_seed=seed)
assert bcs.balanced_ratio == ratio
assert bcs.min_samples == samples
assert bcs.min_percentage == percentage
assert bcs.random_seed == seed
def test_balanced_classification_errors():
with pytest.raises(ValueError, match="balanced_ratio must be"):
BalancedClassificationSampler(balanced_ratio=-1)
with pytest.raises(ValueError, match="min_sample must be"):
BalancedClassificationSampler(min_samples=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0.6)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=-1.3)
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_balanced_simple(num_classes):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([i % num_classes for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_binary_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 5 instances of positive 1
y = pd.Series([1 if i % 200 != 0 else 0 for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_multiclass_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 9 instances of 1, 9 instances of 2
y = pd.Series([0 if i % 55 != 0 else (1 + i % 2) for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4, 5, 10])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_balanced_ratio(num_classes, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 750 + [1] * 250)
else:
y = pd.Series([0] * 600 + [1] * 200 + [2] * 200)
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio >= 3:
# the classes are considered balanced, do nothing
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
else:
# remove some samples
assert len(X2) == {2: (250 * (balanced_ratio + 1)), 3: (200 * (balanced_ratio + 2))}[num_classes]
assert len(y2) == len(X2)
assert y2.value_counts().values[0] == balanced_ratio * {2: 250, 3: 200}[num_classes]
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_samples(num_classes, min_samples):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 900 + [1] * 100)
else:
y = pd.Series([0] * 799 + [1] * 101 + [2] * 100)
bcs = BalancedClassificationSampler(balanced_ratio=1, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples <= 100:
# balance 1:1 without conflicting with min_samples
assert len(X2) == {2: 200, 3: 300}[num_classes]
assert y2.value_counts().values[0] == 100
else:
# cannot balance 1:1, choosing the min_samples size for the majority class and add minority class(es)
if num_classes == 2:
assert len(X2) == min_samples + 100
assert y2.value_counts().values[0] == min_samples
else:
assert len(X2) == min_samples + 201
assert y2.value_counts().values[0] == min_samples
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_percentage(num_classes, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 950 + [1] * 50)
else:
y = pd.Series([0] * 820 + [1] * 90 + [2] * 90)
bcs = BalancedClassificationSampler(balanced_ratio=1, min_percentage=min_percentage)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_percentage <= 0.05:
# does not classify as severe imbalance, so balance 1:1 with min_samples==100
assert len(X2) == {2: 150, 3: 280}[num_classes]
assert y2.value_counts().values[0] == 100
else:
# severe imbalance, do nothing
pd.testing.assert_frame_equal(X2, X)
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_severe_imbalance_binary(min_samples, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([0] * 850 + [1] * 150) # minority class is 15% of total distribution
bcs = BalancedClassificationSampler(balanced_ratio=2, min_samples=min_samples, min_percentage=min_percentage)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples >= 200 and min_percentage >= 0.2:
# severe imbalance, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# does not classify as severe imbalance, so balance 2:1 with min_samples
assert len(X2) == 150 + max(min_samples, 2 * 150)
assert y2.value_counts().values[0] == max(min_samples, 2 * 150)
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4.5, 5, 6, 10])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_normal_imbalance_binary(min_samples, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([0] * 850 + [1] * 150) # minority class is 15% of total distribution, never counts as severe imbalance
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio >= 6:
# data is balanced, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# rebalance according to the ratio and min_samples
assert len(X2) == 150 + max(min_samples, int(balanced_ratio * 150))
assert y2.value_counts().values[0] == max(min_samples, int(balanced_ratio * 150))
@pytest.mark.parametrize("data_type", ['n', 's'])
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_severe_imbalance_multiclass(data_type, min_samples, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if data_type == 'n':
y = | pd.Series([0] * 800 + [1] * 100 + [2] * 100) | pandas.Series |
# imports
# base
import operator
import json
from collections import Counter
# third party
import pandas as pd
import dash
import plotly.figure_factory as ff
# project
from .layout import retrieve_layout
# NON-CALLBACK FUNCTIONS REQUIRED FOR THE APP
# -----------------------------------------------------------------------------
class biter(object):
"""A bidirectional iterator which ensures that the index doesn't
overflow in either direction. Used for generating the next
cluster on the list.
Parameters
----------
collection : type
a `collection` of some sort to iterate through
Attributes
----------
index : int
keeps track of the place in the collection
collection
"""
def __init__(self, collection=[]):
self.collection = collection
self.index = -1
def next(self):
"""Return the next object in the collection.
Returns
-------
next object in the collection
"""
try:
self.index += 1
return self.collection[self.index]
except IndexError:
self.index = len(self.collection) - 1
return self.collection[self.index]
def prev(self):
"""return previous object in the collection
Returns
-------
previous object in the collection
"""
# prevent negative outcomes
if self.index != 0:
self.index -= 1
return self.collection[self.index]
def config_app():
"""
Configure the APP in the required manner.
Enables local css and js as well as sets page title to Optimus.
Returns
-------
dash_app
A configured dash app
"""
# DASH CONFIGSs
app = dash.Dash()
# settings to serve css locally
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
app.title = 'Optimus'
return app
def which_button(btn_dict):
"""
Assesses which button was pressed given the time
each button was pressed at. It finds the latest
pressed button and returns the key for it.
Parameters
----------
btn_dict : dict
an input dict in the form of {'button name': float(n_clicks_timestamp)}
Returns
-------
dict_key
Returns whatever the dict keys are for the key that was pressed latest
"""
return max(btn_dict.items(), key=operator.itemgetter(1))[0]
def preprocess(config):
"""
A function which uses the config provided to
create the output files from the original data.
On top of this it tweaks it slightly.
Parameters
----------
config : dict
dictionary containing the config. just needs the paths
to input and output mainly.
Returns
-------
(list, pd.DataFrame.columns)
return a list of clusters to keep based on
the number of items in it as well as the columns in the
dataframe.
"""
# read the base data
df = | pd.read_csv(config['data'], encoding=config['encoding']) | pandas.read_csv |
import glob
from Bio import SeqIO
import random
import pandas as pd
def slidingWindow(sequence, winSize, step=1):
"""Returns a generator that will iterate through
the defined chunks of input sequence. Input sequence
must be iterable."""
# Verify the inputs
try:
it = iter(sequence)
except TypeError:
raise Exception("**ERROR** sequence must be iterable.")
if not ((type(winSize) == type(0)) and (type(step) == type(0))):
raise Exception("**ERROR** type(winSize) and type(step) must be int.")
if step > winSize:
raise Exception("**ERROR** step must not be larger than winSize.")
if winSize > len(sequence):
raise Exception("**ERROR** winSize must not be larger than sequence length.")
# Pre-compute number of chunks to emit
numOfChunks = ((len(sequence) - winSize) / step) + 1
# Do the work
for i in range(0, int(numOfChunks) * step, step):
yield sequence[i:i + winSize]
def positive(filepath):
rlist1 = []
fastq_sequences = SeqIO.parse(open(filepath), 'fastq')
for fastq in fastq_sequences:
sequence = str(fastq.seq)
rlist1.append(sequence)
return rlist1
sequence=[]
label=[]
# insert your fastq to test here
positive_list = positive("NCFB_D_MG000106_2020_R1.fastq")
for x in range(len(positive_list)):
to_insert = positive_list[x]
#to_insert = random.choice(positive_list)
# if you want to draw random lines from the file and test them change to_insert to the above line
sequence.append(to_insert)
if x == 1:
label.append("insert")
else:
label.append("no_insert")
insert_df = | pd.DataFrame(sequence,label,columns=["sequence"]) | pandas.DataFrame |
import argparse
import asyncio
import json
import logging
import zipfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import annofabapi
import pandas
from annofabapi.parser import SimpleAnnotationZipParser
import annofabcli
import annofabcli.common.cli
from annofabcli import AnnofabApiFacade
from annofabcli.common.cli import AbstractCommandLineInterface, ArgumentParser, build_annofabapi_resource_and_login
from annofabcli.common.download import DownloadingFile
logger = logging.getLogger(__name__)
def _millisecond_to_hour(millisecond: int):
return millisecond / 1000 / 3600
def _get_time_range(str_data: str):
tmp_list = str_data.split(",")
return (int(tmp_list[0]), int(tmp_list[1]))
class ListOutOfRangeAnnotationForMovieMain:
def __init__(self, service: annofabapi.Resource):
self.service = service
@staticmethod
def get_max_seconds_for_webapi(annotation: Dict[str, Any]) -> Tuple[float, float]:
details = annotation["details"]
range_list = [_get_time_range(e["data"]) for e in details if e["data"] is not None]
if len(range_list) == 0:
return 0, 0
else:
max_begin = max([e[0] for e in range_list]) / 1000
max_end = max([e[1] for e in range_list]) / 1000
return max_begin, max_end
@staticmethod
def get_max_seconds_for_zip(annotation: Dict[str, Any]) -> Tuple[float, float]:
details = annotation["details"]
range_list = [(e["data"]["begin"], e["data"]["end"]) for e in details if e["data"]["_type"] == "Range"]
if len(range_list) == 0:
return 0, 0
else:
max_begin = max([e[0] for e in range_list]) / 1000
max_end = max([e[1] for e in range_list]) / 1000
return max_begin, max_end
def create_dataframe(
self,
project_id: str,
task_list: List[Dict[str, Any]],
input_data_list: List[Dict[str, Any]],
annotation_zip: Optional[Path],
) -> pandas.DataFrame:
if annotation_zip is None:
logger.info(f"{len(task_list)} 件のアノテーション情報をWebAPIで取得します。")
for task_index, task in enumerate(task_list):
task["worktime_hour"] = _millisecond_to_hour(task["work_time_span"])
task["input_data_id"] = task["input_data_id_list"][0]
annotation, _ = self.service.api.get_editor_annotation(
project_id, task["task_id"], task["input_data_id"]
)
max_seconds = self.get_max_seconds_for_webapi(annotation)
task["max_begin_second"] = max_seconds[0]
task["max_end_second"] = max_seconds[1]
if (task_index + 1) % 100 == 0:
logger.info(f"{task_index+1} 件のアノテーション情報を取得しました。")
else:
logger.info(f"{len(task_list)} 件のアノテーション情報を {str(annotation_zip)} から取得します。")
with zipfile.ZipFile(str(annotation_zip), "r") as zip_file:
for task_index, task in enumerate(task_list):
task["worktime_hour"] = _millisecond_to_hour(task["work_time_span"])
task["input_data_id"] = task["input_data_id_list"][0]
parser = SimpleAnnotationZipParser(zip_file, f"{task['task_id']}/{task['input_data_id']}.json")
simple_annotation = parser.load_json()
max_seconds = self.get_max_seconds_for_zip(simple_annotation)
task["max_begin_second"] = max_seconds[0]
task["max_end_second"] = max_seconds[1]
if (task_index + 1) % 100 == 0:
logger.info(f"{task_index+1} 件のアノテーション情報を取得しました。")
df_task = pandas.DataFrame(
task_list,
columns=[
"task_id",
"status",
"phase",
"worktime_hour",
"max_begin_second",
"max_end_second",
"input_data_id",
],
)
df_input_data = pandas.DataFrame(input_data_list, columns=["input_data_id", "input_duration"])
df_merged = | pandas.merge(df_task, df_input_data, how="left", on="input_data_id") | pandas.merge |
import unittest
import pandas as pd
import numpy as np
from khayyam import JalaliDate
from datetime import timedelta
from pandas_jalali.converter import get_gregorian_date_from_jalali_date, validate_jalali_date
class TestConverter(unittest.TestCase):
def setUp(self):
dt = JalaliDate(1346, 12, 30)
dt_jalali_y = []
dt_jalali_m = []
dt_jalali_d = []
dt_gregorian_y = []
dt_gregorian_m = []
dt_gregorian_d = []
for t in range(1, 10000):
dt += timedelta(days=1)
dt_jalali_y.append(dt.year)
dt_jalali_m.append(dt.month)
dt_jalali_d.append(dt.day)
gregorian = dt.todate()
dt_gregorian_y.append(gregorian.year)
dt_gregorian_m.append(gregorian.month)
dt_gregorian_d.append(gregorian.day)
self.dt_jalali_y = pd.Series(dt_jalali_y)
self.dt_jalali_m = pd.Series(dt_jalali_m)
self.dt_jalali_d = pd.Series(dt_jalali_d)
self.dt_gregorian_y = pd.Series(dt_gregorian_y)
self.dt_gregorian_m = pd.Series(dt_gregorian_m)
self.dt_gregorian_d = pd.Series(dt_gregorian_d)
def test_get_gregorian_date_from_jalali_date(self):
y, m, d = get_gregorian_date_from_jalali_date(
self.dt_jalali_y,
self.dt_jalali_m,
self.dt_jalali_d
)
self.assertTrue(y.equals(self.dt_gregorian_y.astype(float)))
self.assertTrue(m.equals(self.dt_gregorian_m.astype(float)))
self.assertTrue(d.equals(self.dt_gregorian_d.astype(float)))
def test_validate_jalali_date(self):
dt_jalali_y = pd.Series([4178, 1346, 1346, None, None, 1346])
dt_jalali_m = pd.Series([1, 1, 23, None, 1, 1])
dt_jalali_d = pd.Series([1, 34, 1, None, 1, 1])
y, m, d = validate_jalali_date(
dt_jalali_y,
dt_jalali_m,
dt_jalali_d
)
self.assertTrue(pd.Series(y).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1346])))
self.assertTrue(pd.Series(m).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1])))
self.assertTrue(pd.Series(d).equals(pd.Series([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 1])))
def test_invalid_date_convertation(self):
dt_jalali_y = | pd.Series([np.NaN, 1346]) | pandas.Series |
from datetime import datetime
from functools import partial
import numpy as np
from unittest.mock import Mock
from unittest.mock import call
from unittest.mock import patch
from dateutil.parser import parse
import pandas
import pytest
class TestFit:
@pytest.fixture
def fit(self):
from palladium.fit import fit
return fit
@pytest.fixture
def dataset_loader(self):
dataset_loader = Mock()
dataset_loader.return_value = Mock(), Mock()
return dataset_loader
def test_it(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
model_persister.write.assert_called_with(model)
model_persister.activate.assert_called_with(
model_persister.write.return_value)
def test_no_persist(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister,
persist=False)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
assert model_persister.call_count == 0
def test_evaluate_no_test_dataset(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
X, y = object(), object()
dataset_loader_train.return_value = X, y
result = fit(dataset_loader_train, model, model_persister,
evaluate=True)
assert result is model
dataset_loader_train.assert_called_with()
model.fit.assert_called_with(X, y)
assert model.score.call_count == 1
model.score.assert_called_with(X, y)
model_persister.write.assert_called_with(model)
def test_evaluate_with_test_dataset(self, fit):
model, dataset_loader_train, model_persister = Mock(), Mock(), Mock()
del model.cv_results_
dataset_loader_test = Mock()
X, y, X_test, y_test = object(), object(), object(), object()
dataset_loader_train.return_value = X, y
dataset_loader_test.return_value = X_test, y_test
result = fit(dataset_loader_train, model, model_persister,
dataset_loader_test=dataset_loader_test,
evaluate=True)
assert result is model
dataset_loader_train.assert_called_with()
dataset_loader_test.assert_called_with()
model.fit.assert_called_with(X, y)
assert model.score.call_count == 2
assert model.score.mock_calls[0] == call(X, y)
assert model.score.mock_calls[1] == call(X_test, y_test)
model_persister.write.assert_called_with(model)
def test_evaluate_annotations(self, fit, dataset_loader):
model = Mock()
del model.cv_results_
model.score.side_effect = [0.9, 0.8]
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=Mock(),
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result.__metadata__['score_train'] == 0.9
assert result.__metadata__['score_test'] == 0.8
def test_evaluate_scoring(self, fit, dataset_loader):
model = Mock()
del model.cv_results_
scorer = Mock()
scorer.side_effect = [0.99, 0.01]
fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=Mock(),
dataset_loader_test=dataset_loader,
scoring=scorer,
evaluate=True,
)
assert model.score.call_count == 0
assert scorer.call_count == 2
def test_evaluate_no_score(self, fit, dataset_loader):
model = Mock()
del model.score
del model.cv_results_
with pytest.raises(ValueError):
fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=Mock(),
dataset_loader_test=dataset_loader,
evaluate=True,
)
def test_persist_if_better_than(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
del model.cv_results_
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result is model
assert model_persister.write.call_count == 1
def test_persist_if_better_than_false(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
del model.cv_results_
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.91,
)
assert result is model
assert model_persister.write.call_count == 0
def test_persist_if_better_than_persist_false(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
del model.cv_results_
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
persist=False,
dataset_loader_test=dataset_loader,
persist_if_better_than=0.9,
)
assert result is model
assert model_persister.write.call_count == 0
def test_persist_if_better_than_no_dataset_test(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.score.return_value = 0.9
del model.cv_results_
with pytest.raises(ValueError):
fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
dataset_loader_test=None,
persist_if_better_than=0.9,
)
def test_activate_no_persist(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
del model.cv_results_
result = fit(
dataset_loader_train=dataset_loader,
model=model,
model_persister=model_persister,
persist=False,
)
assert result is model
assert model_persister.activate.call_count == 0
def test_timestamp(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
del model.cv_results_
def persist(model):
assert 'train_timestamp' in model.__metadata__
model_persister.write.side_effect = persist
before_fit = datetime.now()
result = fit(
dataset_loader,
model,
model_persister,
)
after_fit = datetime.now()
assert result is model
timestamp = parse(model.__metadata__['train_timestamp'])
assert before_fit < timestamp < after_fit
model_persister.write.assert_called_with(model)
def test_cv_results(self, fit, dataset_loader):
model, model_persister = Mock(), Mock()
model.cv_results_ = {
'mean_train_score': [3, 2, 1],
'mean_test_score': [1, 2, 3],
}
def persist(model):
assert 'cv_results' in model.__metadata__
model_persister.write.side_effect = persist
result = fit(
dataset_loader,
model,
model_persister,
)
assert result is model
cv_results = model.__metadata__['cv_results']
cv_results = | pandas.read_json(cv_results) | pandas.read_json |
# this file contains all components needed to collect, format and save the data from dwd
import os
import re
import requests
from zipfile import ZipFile
from io import TextIOWrapper, BytesIO
import csv
import pandas as pd
import numpy as np
import datetime
# constants
DWD_URL_HISTORICAL = "https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/historical/"
DWD_URL_RECENT = "https://opendata.dwd.de/climate_environment/CDC/observations_germany/climate/hourly/air_temperature/recent/"
DWD_FOLDER = os.path.join(os.path.dirname(__file__), "data", "dwd")
os.makedirs(DWD_FOLDER, exist_ok = True)
def get_unpacked_zips(*urls):
"""
this function is a generator which downloads and unzips all .zip files from an url
"""
for url in urls:
html = str(requests.get(url).content)
for zip_link in [f"{url}{link}" for link in re.findall(r'href="(\w*\.zip)"', html)]:
yield ZipFile(BytesIO(requests.get(zip_link).content))
def verify_dwd_urls(*urls):
"""
this function tests urls, to check if they are from dwd. (default urls are historical & recent)
"""
if len(urls) == 0:
urls = [
DWD_URL_HISTORICAL,
DWD_URL_RECENT
]
for url in urls:
if not "https://opendata.dwd.de/" in url:
raise Exception(f"The url '{url}' is not supported, only urls from 'https://opendata.dwd.de/' are supported.")
return urls
def download_dwd(*urls):
"""
this function downloads data from dwd and saves it as an parquet. (default urls are historical & recent)
"""
urls = verify_dwd_urls(*urls)
for unpacked in get_unpacked_zips(*urls):
data_files = [f for f in unpacked.namelist() if ".txt" in f]
meta_data_file = [f for f in data_files if "Metadaten_Geographie" in f][0]
main_data_file = [f for f in data_files if "produkt_tu_stunde" in f][0]
station_id = int(main_data_file.split("_")[-1].split(".")[0])
# reading main data
with unpacked.open(main_data_file, "r") as main_data:
station_df = pd.DataFrame(
csv.DictReader(TextIOWrapper(main_data, 'utf-8'), delimiter=';')
).drop(["STATIONS_ID", "QN_9", "eor"], axis="columns")
station_df.columns = ["TIME", "TEMPERATURE", "HUMIDITY"]
station_df.TIME = pd.to_datetime(station_df.TIME, format="%Y%m%d%H", utc=True)
# adding missing rows
station_df = pd.merge(
pd.DataFrame({
"TIME": pd.date_range(
station_df.TIME.min(),
station_df.TIME.max(),
freq = "1H",
tz = "utc"
)
}),
station_df,
how = "outer"
).fillna(-999)
# clean up
station_df.TEMPERATURE = | pd.to_numeric(station_df.TEMPERATURE, downcast="float") | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Pool_Revenues, Reserve_Revenues], axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" ($/KW-yr)")
fig1, ax = plt.subplots(figsize=(self.x,self.y))
net_rev = plt.plot(Net_Revenue.index, Net_Revenue.values, color='black', linestyle='None', marker='o')
Total_Systems_Cost_Out.plot.bar(stacked=True, edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='upper center',bbox_to_anchor=(0.5,-0.15),
facecolor='inherit', frameon=True, ncol=3)
#Legend 1
leg1 = ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
#Legend 2
ax.legend(net_rev, ['Net Revenue'], loc='center left',bbox_to_anchor=(1, 0.9),
facecolor='inherit', frameon=True)
# Manually add the first legend back
ax.add_artist(leg1)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = | pd.DataFrame() | pandas.DataFrame |
"""
Compare COVID-19 simulation outputs to data.
Estimate Rt using epyestim
"""
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
import matplotlib.dates as mdates
import epyestim
import epyestim.covid19 as covid19
import seaborn as sns
from estimate_Rt_trajectores import *
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-stem",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
parser.add_argument(
"--plot_only",
action='store_true',
help="If specified only Rt plots will be generated, given Rt was already estimated",
)
parser.add_argument(
"--use_pre_aggr",
action='store_true',
help="If specified uses pre-aggregated new_infections instead of new_infections per trajectory to estimate Rt",
)
return parser.parse_args()
def run_Rt_estimation(exp_name,grp_numbers,smoothing_window, r_window_size):
"""
Rt estimation using median new_infections, aggregated from the trajectoriesDat.csv
Code following online example:
https://github.com/lo-hfk/epyestim/blob/main/notebooks/covid_tutorial.ipynb
smoothing_window of 28 days was found to be most comparable to EpiEstim in this case
r_window_size default is 3 if not specified, increasing r_window_size narrows the uncertainity bounds
"""
simdate = exp_name.split("_")[0]
df = pd.read_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'))
df['date'] = pd.to_datetime(df['date'])
df = df[(df['date'] > pd.Timestamp('2020-03-01'))]
df_rt_all = pd.DataFrame()
for ems_nr in grp_numbers:
if ems_nr == 0:
region_suffix = "illinois"
else:
region_suffix = f'covidregion_{str(ems_nr)}'
if region_suffix not in df["geography_modeled"].unique():
continue
mdf = df[df['geography_modeled'] == region_suffix]
mdf = mdf.set_index('date')['cases_new_median']
"""Use default distributions (for covid-19)"""
si_distrb, delay_distrb = get_distributions(show_plot=False)
df_rt = covid19.r_covid(mdf[:-1], smoothing_window=smoothing_window, r_window_size=r_window_size)
df_rt['geography_modeled'] = region_suffix
df_rt.reset_index(inplace=True)
df_rt = df_rt.rename(columns={'index': 'date',
'Q0.5': 'rt_median',
'Q0.025': 'rt_lower',
'Q0.975': 'rt_upper'})
df_rt['model_date'] = pd.Timestamp(simdate)
df_rt = df_rt[['model_date', 'date', 'geography_modeled', 'rt_median', 'rt_lower', 'rt_upper']]
# df_rt['smoothing_window'] =smoothing_window
# df_rt['r_window_size'] = r_window_size
df_rt_all = df_rt_all.append(df_rt)
df_rt_all['rt_pre_aggr'] = use_pre_aggr
df_rt_all.to_csv(os.path.join(exp_dir, 'rtNU.csv'), index=False)
if not 'rt_median' in df.columns:
df['date'] = pd.to_datetime(df['date'])
df_rt_all['date'] = | pd.to_datetime(df_rt_all['date']) | pandas.to_datetime |
from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.shell.shell import Shell
from cloudmesh.common.console import Console
from cloudmesh.compute.vm.Provider import Provider
import pandas as pd
import numpy as np
from cloudmesh.common.Printer import Printer
from cloudmesh.frugal.api import aws_frugal, gcp_frugal, azure_frugal
from datetime import datetime
from cloudmesh.common.variables import Variables
from cloudmesh.vm.command.vm import VmCommand
from cloudmesh.mongo.CmDatabase import CmDatabase
from os import path
class FrugalCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_frugal(self, args, arguments):
"""
::
Usage:
frugal list [--benchmark] [--refresh] [--order=ORDER] [--size=SIZE] [--cloud=CLOUD]
frugal boot [--refresh] [--order=ORDER] [--cloud=CLOUD]
frugal benchmark
Arguments:
ORDER sorting hierarchy, either price, cores, or
memory
SIZE number of results to be printed to the
console. Default is 25, can be changed with
cms set frugal.size = SIZE
CLOUD Limits the frugal method to a specific cloud
instead of all supported providers
Options:
--refresh forces a refresh on all entries for
all supported providers
--order=ORDER sets the sorting on the results list
--size=SIZE sets the number of results returned
to the console
--benchmark prints the benchmark results instead
of flavors
Description:
frugal list
lists cheapest flavors for aws, azure, and gcp
in a sorted table by default, if --benchmark is
used then it lists benchmark results stored in
the db
frugal boot
boots the cheapest bootable vm from the frugal
list.
frugal benchmark
executes a benchmarking command on the newest
available vm on the current cloud
Examples:
cms frugal list --refresh --order=price --size=150
cms frugal list --benchmark
cms frugal boot --order=memory
cms frugal benchmark
...and so on
Tips:
frugal benchmark will stall the command line after
the user enters their ssh key. This means the benchmark
is running
frugal benchmark is dependent on the vm put command.
this may need to be manually added to the vm command
file.
Limitations:
frugal boot and benchmark only work on implemented providers
"""
arguments.REFRESH = arguments['--refresh'] or None
arguments.SIZE = arguments['--size'] or None
arguments.ORDER = arguments['--order'] or None
arguments.BENCHMARK = arguments['--benchmark'] or None
arguments.CLOUD = arguments['--cloud'] or None
var_list = Variables(filename="~/.cloudmesh/var-data")
var_size = var_list['frugal.size']
if arguments.ORDER is None:
arguments.ORDER='price'
if arguments.REFRESH is None:
arguments.REFRESH=False
else:
arguments.REFRESH=True
if arguments.BENCHMARK is None:
arguments.BENCHMARK=False
else:
arguments.BENCHMARK=True
if arguments.SIZE is None:
arguments.SIZE=var_size
if arguments.list:
self.list(order = arguments.ORDER,refresh=bool(arguments.REFRESH), resultssize= int(arguments.SIZE), benchmark=arguments.BENCHMARK, cloud=arguments.CLOUD)
elif arguments.boot:
self.boot(order = arguments.ORDER,refresh=bool(arguments.REFRESH), cloud=arguments.CLOUD)
elif arguments.benchmark:
self.benchmark()
else:
return ""
return ""
def list(self,order='price', resultssize=25, refresh=False, printit = True, benchmark=False, cloud=None):
clouds = ['aws', 'azure', 'gcp']
if cloud in clouds:
clouds = [cloud]
if benchmark:
# get benchmarks
cm = CmDatabase();
benchmarks = []
for cloud in clouds:
print("searching " + cloud)
benchmarktemp = list(cm.collection(cloud + '-frugal-benchmark').find())
benchmarks = benchmarks+benchmarktemp
print(Printer.write(benchmarks, order=['cloud', 'name', 'region', 'ImageId', 'flavor', 'updated', 'BenchmarkTime']))
return
else:
#check to make sure that order is either price, cores, or memory
if order not in ['price', 'cores', 'memory']:
Console.error(f'order argument must be price, cores, or memory')
return
printlist=[]
if 'aws' in clouds:
# get aws pricing info
printlist = printlist + list(aws_frugal.get_aws_pricing(refresh=refresh).find())
if 'gcp' in clouds:
# get gcp pricing info
printlist = printlist + list(gcp_frugal.get_google_pricing(refresh=refresh).find())
if 'azure' in clouds:
# get azure pricing info
printlist = printlist + list(azure_frugal.get_azure_pricing(refresh=refresh).find())
# turn numpy array into a pandas dataframe, assign column names, and remove na values
flavor_frame = | pd.DataFrame(printlist) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial.polynomial import polyfit
from scipy.stats import shapiro
from scipy.stats import ttest_ind as tt
from scipy.stats import spearmanr as corrp
import numpy as np
from statsmodels.graphics.gofplots import qqplot
font = {'family' : 'sans-serif',
'weight' : 'light',
'size' : 16}
matplotlib.rc('font', **font)
bad_indices=[]
sr_data=pd.read_csv('self_report_study2.csv') #load self-report data
mb_agnostic=pd.read_csv('mb_scores_rares_empirical_best.csv')
mb_scores=mb_agnostic['MB_behav']
state_t1=pd.read_csv('Gillan_TL_full_lrT.csv',header=None) #load state transition lrs
state_t=pd.read_csv('Gillan_Or_full_lrT_decay.csv',header=None) #load state transition lrs
print(len(state_t1))
r,p=corrp(state_t1[0],state_t[0])
print('CORREL ST TL both models : {}, p {}'.format(r,p))
it_mb=pd.read_csv('Gillan_Or_full_MB_decay.csv',header=None) #load MB beta
# it_mb=np.log(it_mb)
mf1=pd.read_csv('Gillan_Or_full_MF1_decay.csv',header=None)
mf2=pd.read_csv('Gillan_Or_full_mf2_decay.csv',header=None)
lr1=pd.read_csv('Gillan_Or_full_lr1_decay.csv',header=None)
# lr2=pd.read_csv('Gillan_O_full_lr2_decay.csv',header=None)
st=pd.read_csv('Gillan_Or_full_st_decay.csv',header=None)
mf1a=pd.read_csv('Gillan_Or_full_mf1a_decay.csv',header=None)
mf=mf1a+mf1
# mf1=pd.read_csv('mf1.csv',header=None)
# lr1=pd.read_csv('lr1.csv',header=None)
# lr2=pd.read_csv('lr2.csv',header=None)
# temp2=pd.read_csv('temp2nd.csv',header=None)
# stick=pd.read_csv('st.csv',header=None)
print('state transition LR mean: {}, sd: {}'.format(np.mean(state_t[0]),np.std(state_t[0])))
f2_low_lr=[]
f2_low_mb=[]
f2_high_lr=[]
f2_high_mb=[]
f2_low_mba=[]
f2_high_mba=[]
sr_data_r=sr_data
fac1=sr_data_r['Factor1']
fac2=sr_data_r['Factor2']
fac3=sr_data_r['Factor3']
print('mean MB agnostic score: {}, sd: {}'.format(np.mean(mb_scores),np.std(mb_scores)))
bad_rows=[]
low_mb=[]
# it_mb=np.log(it_mb)
# mf=np.log(mf)
# state_t=np.log(state_t)
# fac2=fac2+2.65
# fac2r=np.log(fac2+2.65)
# print('MINIMUM VALUE COMPULSIVITY : {}'.format(np.min(fac2r)))
high_lr_mbbeta=[]
high_lr_mbscores=[]
for i in range(len(fac2)):
# if (it_mb[0][i]>15):
# bad_rows.append(i)
if (it_mb[0][i]<3):
bad_rows.append(i)
if (state_t[0][i])>0.35:
high_lr_mbbeta.append(it_mb[0][i])
bad_rows.append(i)
high_lr_mbscores.append(mb_scores[i])
# if mb_scores[i]<0.05:
# bad_rows.append(i)
print('\ndistribution MB Betas for high TLs\n')
ax = sns.distplot(high_lr_mbbeta)
plt.show()
sns.scatterplot(x=high_lr_mbbeta,y=high_lr_mbscores)
plt.ylabel('mb scores (high TL)')
plt.xlabel('mb beta (high TL)')
plt.show()
# print(np.mode(it_mb[0]))
state_t = state_t.drop(labels=bad_rows)
state_t = state_t.reset_index(drop=True)
it_mb = it_mb.drop(labels=bad_rows)
it_mb = it_mb.reset_index(drop=True)
mf1 = mf1.drop(labels=bad_rows)
mf1 = mf1.reset_index(drop=True)
mf2 = mf2.drop(labels=bad_rows)
mf2 = mf2.reset_index(drop=True)
mf = mf.drop(labels=bad_rows)
mf = mf.reset_index(drop=True)
lr1 = lr1.drop(labels=bad_rows)
lr1 = lr1.reset_index(drop=True)
st = st.drop(labels=bad_rows)
st = st.reset_index(drop=True)
mf1a = mf1a.drop(labels=bad_rows)
mf1a = mf1a.reset_index(drop=True)
mb_scores = mb_scores.drop(labels=bad_rows)
mb_scores = mb_scores.reset_index(drop=True)
import scipy
# print(scipy.stats.beta.fit(state_t[0],floc=0,fscale=1))
# print(scipy.stats.gamma.fit(it_mb[0],floc=0,fscale=1))
fac2r=fac2.drop(labels=bad_rows)
fac2r = fac2r.reset_index(drop=True)
# fac2r=np.log(fac2r+2.65)
above_2_high=[]
above_2_low=[]
for i in range(len(fac2r)):
if fac2r[i]>=1.0:
f2_high_lr.append(state_t[0][i])
f2_high_mb.append(it_mb[0][i])
f2_high_mba.append(mb_scores[i])
if state_t[0][i]>0.094:
above_2_high.append(state_t[0][i])
elif fac2r[i]<=-1:
if state_t[0][i]>0.094:
above_2_low.append(state_t[0][i])
f2_low_lr.append(state_t[0][i])
f2_low_mb.append(it_mb[0][i])
f2_high_mba.append(mb_scores[i])
# it_mb=np.log(it_mb)
# state_t=np.log(state_t)
# lr1=np.log(lr1)
# state_t=state_t+1
print('low MB performers compulsivity scores: {}'.format(low_mb))
print('mean low comp: {}, mean high comp state LR: {}'.format(np.median(f2_low_lr),np.median(f2_high_lr)))
print('mean low comp: {}, mean high comp MB-beta: {}'.format(np.mean(f2_low_mb),np.mean(f2_high_mb)))
print('')
print('percentage high comp above mean TL : {}'.format(len(above_2_high)/len(f2_high_lr)))
print('percentage low above mean TL : {}'.format(len(above_2_low)/len(f2_low_lr)))
print('')
# fac2r=np.log(fac2r+2.65)
print('mean: {}, sd: {} of TL full sample'.format(np.mean(state_t[0]),np.std(state_t[0])))
print('')
print('mean: {}, sd: {} of compulsivity full sample'.format(np.mean(fac2),np.std(fac2)))
print('mean: {}, sd: {} of compulsivity reduced sample'.format(np.mean(fac2r),np.std(fac2r)))
t,p=tt(fac2,fac2r,equal_var=False)
print('difference in compulsivity before after t: {}, p:{}'.format(t,p))
print('here')
print(mf[0][1])
print('here')
print(len(it_mb[0]))
print('here')
ratio_mfmb=[(it_mb[0][i]-mf[0][i])/ (mf[0][i]+it_mb[0][i]) for i in range(len(it_mb[0]))]
print('B-MF median: {}'.format(np.median(mf1)))
print('B-MF mean: {}'.format(np.mean(mf1[0])))
fac1=fac1.drop(labels=bad_rows)
fac3=fac3.drop(labels=bad_rows)
sr_data= sr_data.drop(labels=bad_rows)
sr_data =sr_data.reset_index(drop=True)
# print('mean: {}, sd: {} of compulsivity small sample'.format(np.mean(fac2_r),np.std(fac2_r)))
r,p=corrp(mb_scores,state_t[0])
print('model agnostic scores and state_t: {}, pval: {}'.format(r,p))
r,p=corrp(mb_scores,it_mb[0])
print('model agnostic scores and MB beta: {}, pval: {}'.format(r,p))
r,p=corrp(mb_scores,lr1[0])
print('model agnostic scores and decay rate: {}, pval: {}'.format(r,p))
r,p=corrp(state_t[0],lr1[0])
print('state TL and decay rate: {}, pval: {}'.format(r,p))
r,p=corrp(it_mb[0],state_t[0])
print('MB beta and state_t: {}, pval: {}'.format(r,p))
TLMB=state_t*it_mb
r,p=corrp(state_t[0],TLMB[0])
print('TLMB and STATE TL: {}, pval: {}'.format(r,p))
lrt_r=state_t
corr_fac1=sr_data_r['Factor1'].corr(np.log(lrt_r[0]))
print(corr_fac1)
iq=sr_data_r['iq']
corr_fac2=sr_data_r['Factor2'].corr(np.log(lrt_r[0]))
print(corr_fac2)
corr_fac3=sr_data_r['Factor3'].corr(np.log(lrt_r[0]))
print(corr_fac3)
sns.scatterplot(x=fac2r,y=state_t[0])
plt.ylabel('State Transition Learning ')
plt.xlabel('Compulsivity')
plt.show()
sns.scatterplot(x=fac2r,y=mf[0])
plt.ylabel('MF ')
plt.xlabel('Compulsivity')
plt.show()
sns.scatterplot(x=fac2r,y=lr1[0])
plt.ylabel('LR DECAY ')
plt.xlabel('Compulsivity')
plt.show()
sns.scatterplot(x=fac2r,y=ratio_mfmb)
plt.ylabel('ratio mfmb ')
plt.xlabel('Compulsivity')
plt.show()
sns.scatterplot(x=state_t[0],y=it_mb[0])
plt.ylabel('MB Beta ')
plt.xlabel('State T')
plt.show()
sns.scatterplot(x=fac2r,y=it_mb[0])
plt.ylabel('(log) MB Beta ')
plt.xlabel('Compulsivity')
plt.show()
sns.scatterplot(x=fac2r,y=mb_scores)
plt.ylabel('MB ma')
plt.xlabel('Compulsivity')
plt.show()
from mpl_toolkits.mplot3d import Axes3D
sns.set(style = "darkgrid")
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
df= | pd.DataFrame() | pandas.DataFrame |
import sys
import operator
import re, string
import csv
import math
import numpy as np
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_regression
from kmodes.kmodes import KModes
from kmodes.kprototypes import KPrototypes
from sklearn.decomposition import PCA
import pandas as pd
import sqlite3
import matplotlib.pylab as plt
# Paper on k-modes and k-prototypes
# http://www.cs.ust.hk/~qyang/Teaching/537/Papers/huang98extensions.pdf
# https://pdfs.semanticscholar.org/1955/c6801bca5e95a44e70ce14180f00fd3e55b8.pdf Cao method
# Histogram bins for 3 clusters with outliers [ 34900. , 274933.33333333 , 514966.66666667, 755000.]
# Histogram bins for 2 clusters with outliers [ 34900. 154900. 274900.]
# 50% or more nulls ;
# PoolQC 0.995205
# MiscFeature 0.963014
# Alley 0.937671
# Fence 0.807534
numRows = 1459
numCols = 79
histoBins = []
allFeatures = ['MSSubClass','MSZoning','LotFrontage','LotArea','Street','Alley','LotShape','LandContour','Utilities',
'LotConfig','LandSlope','Neighborhood','Condition1','Condition2','BldgType','HouseStyle','OverallQual','OverallCond',
'YearBuilt','YearRemodAdd','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','MasVnrArea','ExterQual',
'ExterCond','Foundation','BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinSF1','BsmtFinType2','BsmtFinSF2',
'BsmtUnfSF','TotalBsmtSF','Heating','HeatingQC','CentralAir','Electrical','1stFlrSF','2ndFlrSF','LowQualFinSF',
'GrLivArea','BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvGr','KitchenQual','TotRmsAbvGrd',
'Functional','Fireplaces','FireplaceQu','GarageType','GarageYrBlt','GarageFinish','GarageCars','GarageArea','GarageQual',
'GarageCond','PavedDrive','WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','PoolQC','Fence',
'MiscFeature','MiscVal','MoSold','YrSold','SaleType','SaleCondition']
numericalFeatures = ['LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd',
'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF',
'GrLivArea', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'TotRmsAbvGrd', 'Fireplaces', 'GarageYrBlt',
'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea',
'MiscVal', 'MoSold', 'YrSold']
categoricalFeatures = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig',
'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st',
'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',
'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'BedroomAbvGr', 'KitchenAbvGr', 'KitchenQual', 'Functional',
'FireplaceQu', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive', 'PoolQC', 'Fence', 'MiscFeature',
'SaleType', 'SaleCondition']
# Check which columns of df have a percentage of null values (nan) higher than p
def checkNulls(df,p) :
check_null = df.isnull().sum(axis=0).sort_values(ascending=False)/float(len(df))
print(check_null[check_null>p])
# Fill nans with 0s for numerical columns and with 'NA' for categorical ones
# also, drop the Id column
def processData(df) :
# Keep only elements in the first bin :
df.drop(df[df.SalePrice > 274934].index, inplace=True)
classLabels = df['SalePrice']
df.drop(['SalePrice'],axis=1,inplace=True)
df.drop(['Id'],axis=1,inplace=True)
df[['BsmtFinSF1']] = df[['BsmtFinSF1']].apply(pd.to_numeric)
# Drop the columns with 50% or more nulls :
# df.drop(['PoolQC'],axis=1,inplace=True)
# df.drop(['MiscFeature'],axis=1,inplace=True)
# df.drop(['Alley'],axis=1,inplace=True)
# df.drop(['Fence'],axis=1,inplace=True)
# All numerical features with 0
df['LotFrontage'].fillna(0,inplace=True)
df['MasVnrArea'].fillna(0,inplace=True)
df['GarageYrBlt'].fillna(0,inplace=True)
df['BsmtFinSF1'].fillna(0,inplace=True)
df['BsmtFinSF2'].fillna(0,inplace=True)
df['BsmtUnfSF'].fillna(0,inplace=True)
df['TotalBsmtSF'].fillna(0,inplace=True)
df['BsmtFullBath'].fillna(0,inplace=True)
df['BsmtHalfBath'].fillna(0,inplace=True)
df['GarageCars'].fillna(0,inplace=True)
df['GarageArea'].fillna(0,inplace=True)
# All categorical ones with NA
df = df.fillna('NA')
return df,classLabels
def normalizeData(df) :
cols_to_norm = numericalFeatures
df[cols_to_norm] = df[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max() - x.min()))
# Discretize the prices in i categories
def processPrices(y,binNum) :
histo = np.histogram(y,binNum)
bins = histo[1]
print("------------------ Real histogram division ------------------")
print(histo[0])
print(bins)
newPrices = []
for i in range(len(y)) :
for j in range(len(bins)-1) :
if ((y[i] >= bins[j]) and (y[i] <= bins[j+1])) :
newPrices.append(j)
histoBins = bins
return newPrices
def dropCols(df,cols) :
for col in cols :
df.drop([col],axis=1,inplace=True)
# Get columns that are categorical
def getCatVars(data) :
catVars = []
for i in range(len(allFeatures)) :
if allFeatures[i] in categoricalFeatures :
catVars.append(i)
return catVars
# Print a table showing the number of each class of elements existing in each cluster
def printTable(model,y) :
print("------------------ Price category Vs cluster placement table ------------------")
classtable = np.zeros((5, 5), dtype=int)
for ii, _ in enumerate(y):
classtable[int(y[ii]) , model.labels_[ii]] += 1
print("\n")
print(" | Cl. 1 | Cl. 2 | Cl. 3 | Cl. 4 |Cl. 5 |")
print("----|-------|-------|-------|-------|-------|")
for ii in range(5):
prargs = tuple([ii + 1] + list(classtable[ii, :]))
print(" P{0} | {1:>2} | {2:>2} | {3:>2} | {4:>2} | {5:>2} |".format(*prargs))
def printCentroidInfo(centroids,featuresNum,featuresCat) :
print("------------------ Centroid Information ------------------")
# Produces centroid information for both numerical and
# categorical variables
centroidsNum = centroids[0]
centroidsCat = centroids[1]
# Obtain the categorical features with different values in at least one cluster
diffFeatures = []
for i in range(len(featuresCat)) :
equal = True
j = 0
while (j < len(centroidsCat)-1 and equal) :
if (centroidsCat[j,i] != centroidsCat[j+1,i]) :
diffFeatures.append(i)
break
j += 1
print("Categorical features info : ")
# Print all features that affect the clusters and the values associated with them
for f in diffFeatures :
print("Feature : "+(featuresCat[f]))
for j in range(len(centroidsCat)) :
print("Centroid "+str(j+1)+" : "+centroidsCat[j,f])
print("Features not shown here have the same value for every cluster")
# Obtain the numerical features with different values in at least one cluster
diffFeatures = []
for i in range(len(featuresNum)) :
equal = True
j = 0
while (j < len(centroidsNum)-1 and equal) :
if (centroidsNum[j,i] != centroidsNum[j+1,i]) :
diffFeatures.append(i)
break
j += 1
print("Numerical features info : ")
# Print all features that affect the clusters and the values associated with them
for f in diffFeatures :
print("Feature : "+(featuresNum[f]))
for j in range(len(centroidsNum)) :
print("Centroid "+str(j+1)+" : "+ str(centroidsNum[j,f]))
print("Features not shown here have the same value for every cluster")
# Main program
if __name__ == '__main__':
# Read data
fName = sys.argv[1]
numClusters = int(sys.argv[2])
# Using test set
if (len(sys.argv) > 3) :
fName2 = sys.argv[3]
fpreds = sys.argv[4]
dffeatures = pd.read_csv(fName2)
dfpreds = pd.read_csv(fpreds)
dfpreds[['Id','SalePrice']] = dfpreds[['Id','SalePrice']].apply(pd.to_numeric)
dfpreds.drop(['Id'],axis=1,inplace=True)
df2 = pd.concat([dffeatures, dfpreds], axis=1)
df1 = | pd.read_csv(fName) | pandas.read_csv |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Callable, Dict, Mapping, Optional, Set, cast, Iterable
import pandas as pd
from ax.core import Trial
from ax.core.base_trial import BaseTrial
from ax.core.data import Data
from ax.core.metric import Metric
from ax.core.runner import Runner as ax_Runner
from ax.service.scheduler import TrialStatus
from pyre_extensions import none_throws
from torchx.runner import Runner, get_runner
from torchx.runtime.tracking import FsspecResultTracker
from torchx.specs import AppDef, AppState, AppStatus, CfgVal
_TORCHX_APP_HANDLE: str = "torchx_app_handle"
_TORCHX_RUNNER: str = "torchx_runner"
_TORCHX_TRACKER_BASE: str = "torchx_tracker_base"
# maps torchx AppState to Ax's TrialStatus equivalent
APP_STATE_TO_TRIAL_STATUS: Dict[AppState, TrialStatus] = {
AppState.UNSUBMITTED: TrialStatus.CANDIDATE,
AppState.SUBMITTED: TrialStatus.STAGED,
AppState.PENDING: TrialStatus.STAGED,
AppState.RUNNING: TrialStatus.RUNNING,
AppState.SUCCEEDED: TrialStatus.COMPLETED,
AppState.CANCELLED: TrialStatus.ABANDONED,
AppState.FAILED: TrialStatus.FAILED,
AppState.UNKNOWN: TrialStatus.FAILED,
}
class AppMetric(Metric):
"""
Fetches AppMetric (the observation returned by the trial job/app)
via the ``torchx.tracking`` module. Assumes that the app used
the tracker in the following manner:
.. code-block:: python
tracker = torchx.runtime.tracking.FsspecResultTracker(tracker_base)
tracker[str(trial_index)] = {metric_name: value}
# -- or --
tracker[str(trial_index)] = {"metric_name/mean": mean_value,
"metric_name/sem": sem_value}
"""
def fetch_trial_data(self, trial: BaseTrial, **kwargs: Any) -> Data:
tracker_base = trial.run_metadata[_TORCHX_TRACKER_BASE]
tracker = FsspecResultTracker(tracker_base)
res = tracker[trial.index]
if self.name in res:
mean = res[self.name]
sem = None
else:
mean = res.get(f"{self.name}/mean")
sem = res.get(f"{self.name}/sem")
if mean is None and sem is None:
raise KeyError(
f"Observation for `{self.name}` not found in tracker at base `{tracker_base}`."
f" Ensure that the trial job is writing the results at the same tracker base."
)
df_dict = {
"arm_name": none_throws(cast(Trial, trial).arm).name,
"trial_index": trial.index,
"metric_name": self.name,
"mean": mean,
"sem": sem,
}
return Data(df= | pd.DataFrame.from_records([df_dict]) | pandas.DataFrame.from_records |
import os
import click
import subprocess
import os.path as op
import numpy as np
import nibabel as nib
import pandas as pd
from tqdm import tqdm
from glob import glob
from scipy.interpolate import interp1d
from scipy.signal import savgol_filter
from nilearn import plotting, signal, masking
from nilearn.datasets import fetch_surf_fsaverage
from nilearn.glm.first_level import run_glm
from sklearn.linear_model import LinearRegression
from nilearn.glm.first_level.experimental_paradigm import check_events
from nilearn.glm.first_level.design_matrix import make_first_level_design_matrix
from nilearn.glm.first_level.hemodynamic_models import _sample_condition, _resample_regressor
from nilearn.glm.first_level.design_matrix import _cosine_drift as dct_set
from .constants import HRFS_HR
def load_gifti(f, return_tr=True):
""" Load gifti array. """
f_gif = nib.load(f)
data = np.vstack([arr.data for arr in f_gif.darrays])
tr = float(f_gif.darrays[0].get_metadata()['TimeStep'])
if return_tr:
return data, tr
else:
return data
def argmax_regularized(data, axis=0, percent=5):
""" Argmax but "regularized" by not taking the actual argmax,
but one relative to `percent` deviation from the max, like
what is done in the original GLMdenoise paper.
Parameters
----------
data : numpy array
A 1D, 2D, or 3D numpy array
axis : int
Axis to take the argmax over (e.g., the one representing n_comps)
percent : int/float
How much the "optimal" index may deviate from the max
Returns
-------
The "regularized argmax'ed" array
"""
# Compute maximum score across axis
maxx = data.max(axis=axis)
# Define cutoff as `percent` from maximum (Kay method)
cutoff = maxx * (1 - percent / 100.)
# Some vectorization magic
if data.ndim == 3:
cutoff = cutoff[:, np.newaxis, :]
# Find first index (argmax) that satisfies criterion
return (data >= cutoff).argmax(axis=axis)
def save_data(data, cfg, ddict, par_dir, desc, dtype, run=None, ext=None,
skip_if_single_run=False, nii=False):
""" Saves data as either numpy files (for fs* space data) or
gzipped nifti (.nii.gz; for volumetric data).
Parameters
----------
data : np.ndarray
Either a 1D (voxels,) or 2D (observations x voxels) array
cfg : dict
Config dictionary
par_dir : str
Name of parent directory ('preproc', 'denoising', 'best')
desc : str
Description string (desc-{desc})
dtype : str
Type of data (_{dtype}.{npy,nii.gz})
run : int/None
Run index (if None, assumed to be a single run)
ext : str
Extension (to determine how to save the data). If None, assuming
fMRI data.
nii : bool
Whether to force saving as nifti (if False, saves as npy)
"""
if data is None:
return None
if skip_if_single_run:
if len(ddict['funcs']) == 1:
return None
save_dir = op.join(cfg['save_dir'], par_dir)
if not op.isdir(save_dir):
os.makedirs(save_dir, exist_ok=True)
sub, ses, task, space, hemi = cfg['c_sub'], cfg['c_ses'], cfg['c_task'], cfg['space'], cfg['hemi']
space_idf = f'{space}_hemi-{hemi}' if 'fs' in space else space
if ses is None: # no separate session output dir
f_base = f"sub-{sub}_task-{task}"
else:
f_base = f"sub-{sub}_ses-{ses}_task-{task}"
if run is None:
f_out = op.join(save_dir, f_base + f'_space-{space_idf}_desc-{desc}_{dtype}')
else:
f_out = op.join(save_dir, f_base + f'_run-{run}_space-{space_idf}_desc-{desc}_{dtype}')
if ext == 'tsv':
data.to_csv(f_out + '.tsv', sep='\t', index=False)
return None
if 'fs' in cfg['space']: # surface, always save as npy
if cfg['save_mgz']:
if data.ndim == 1:
data = data.reshape((data.shape[0], 1, 1))
elif data.ndim == 2:
T, K = data.shape
data = data.reshape((K, 1, 1, T))
else:
raise ValueError("Trying to save data with >2 dimensions as MGZ file ...")
nib.MGHImage(data, np.eye(4)).to_filename(f_out + '.mgz')
else:
np.save(f_out + '.npy', data)
else: # volume, depends on `nii` arg
if nii: # save as volume
if not isinstance(data, nib.Nifti1Image):
data = masking.unmask(data, ddict['mask'])
data.to_filename(f_out + '.nii.gz')
else: # save as npy (faster/less disk space)
if isinstance(data, nib.Nifti1Image):
data = masking.apply_mask(data, ddict['mask'])
np.save(f_out + '.npy', data)
def hp_filter(data, tr, ddict, cfg, standardize=True):
""" High-pass filter (DCT or Savitsky-Golay). """
n_vol = data.shape[0]
st_ref = cfg['slice_time_ref'] # offset frametimes by st_ref * tr
ft = np.linspace(st_ref * tr, (n_vol + st_ref) * tr, n_vol, endpoint=False)
# Create high-pass filter and clean
if cfg['high_pass_type'] == 'dct':
hp_set = dct_set(cfg['high_pass'], ft)
data = signal.clean(data, detrend=False,
standardize=standardize, confounds=hp_set)
else: # savgol, hardcode polyorder (maybe make argument?)
window = int(np.round((1 / cfg['high_pass']) / tr))
data -= savgol_filter(data, window_length=window, polyorder=2, axis=0)
if standardize:
data = signal.clean(data, detrend=False, standardize=standardize)
return data
def get_frame_times(tr, ddict, cfg, Y):
""" Computes frame times for a particular time series (and TR). """
n_vol = Y.shape[0]
st_ref = cfg['slice_time_ref']
return np.linspace(st_ref * tr, (n_vol + st_ref) * tr, n_vol, endpoint=False)
def get_param_from_glm(name, labels, results, dm, time_series=False, predictors=False):
""" Get parameters from a fitted Nilearn GLM. """
if predictors and time_series:
raise ValueError("Cannot get predictors *and* time series.")
# Preallocate
if time_series:
data = np.zeros((dm.shape[0], labels.size))
elif predictors:
data = np.zeros((dm.shape[1], labels.size))
else:
data = np.zeros_like(labels)
# Extract data
for lab in np.unique(labels):
data[..., labels == lab] = getattr(results[lab], name)
return data
def create_design_matrix(tr, frame_times, events, hrf_model='kay', hrf_idx=None):
""" Creates a design matrix based on a HRF from <NAME>'s set
or a default one from Nilearn. """
# This is to keep oversampling consistent across hrf_models
hrf_oversampling = 10
design_oversampling = tr / (0.1 / hrf_oversampling)
if hrf_model != 'kay': # just use Nilearn!
return make_first_level_design_matrix(
frame_times, events, drift_model=None, min_onset=0,
oversampling=design_oversampling, hrf_model=hrf_model
)
if hrf_model == 'kay':
if hrf_idx is None: # 20 different DMs (based on different HRFs)
to_iter = range(HRFS_HR.shape[1])
else: # use the supplied HRF idx (e.g., 5)
to_iter = [hrf_idx]
dms = [] # will store all design matrices
for hrf_idx in to_iter: # iterate across all HRFs
hrf = HRFS_HR[:, hrf_idx]
# scale HRF to have the same max as the glover HRF
# makes comparison easier
hrf /= (hrf.max() / 0.249007)
# Get info
trial_type, onset, duration, modulation = check_events(events)
# Pre-allocate design matrix; note: columns are alphabetically sorted
X = np.zeros((frame_times.size, np.unique(trial_type).size))
uniq_trial_types = np.unique(trial_type) # this is sorted
# Create separate regressor for each unique trial type
# Code copied from Nilearn glm module
for i, condition in enumerate(uniq_trial_types):
condition_mask = (trial_type == condition)
exp_condition = (
onset[condition_mask],
duration[condition_mask],
modulation[condition_mask]
)
# Create high resolution regressor/frame times
hr_regressor, hr_frame_times = _sample_condition(
exp_condition, frame_times, design_oversampling, 0
)
# Convolve with HRF and downsample
conv_reg = np.convolve(hr_regressor, hrf)[:hr_regressor.size]
# linear interpolation for now ...
f = interp1d(hr_frame_times, conv_reg)
X[:, i] = f(frame_times).T
# Note to self: do not scale such that max(X, axis=0) is 1, because you'll lose info
# about predictor variance!
dm = | pd.DataFrame(X, columns=uniq_trial_types, index=frame_times) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array(0, dtype=np.int64)
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = np.array([0, 0, 0, 0, 0], dtype=np.int64)
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series('foo', index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 'foo'
tm.assert_frame_equal(df, expected)
expected = DataFrame({'date': Series(1.0, index=range(5)),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(range(5), dtype=np.int64)})
df.loc[:, 'date'] = 1.0
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_empty(self):
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
tm.assert_frame_equal(df, expected)
def test_loc_setitem_consistency_slice_column_len(self):
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])
df.loc[:, ('Respondent', 'StartDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'StartDate')])
df.loc[:, ('Respondent', 'EndDate')] = pd.to_datetime(df.loc[:, (
'Respondent', 'EndDate')])
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'EndDate')] - df.loc[:, ('Respondent', 'StartDate')]
df.loc[:, ('Respondent', 'Duration')] = df.loc[:, (
'Respondent', 'Duration')].astype('timedelta64[s]')
expected = Series([1380, 720, 840, 2160.], index=df.index,
name=('Respondent', 'Duration'))
tm.assert_series_equal(df[('Respondent', 'Duration')], expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0, 0]
df.loc['a', 'A'] = 1
result = df.loc['a', 'A']
self.assertEqual(result, 1)
result = df.iloc[0, 0]
self.assertEqual(result, 1)
df.loc[:, 'B':'D'] = 0
expected = df.loc[:, 'B':'D']
with catch_warnings(record=True):
result = df.ix[:, 1:]
tm.assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3], dtype='int64')
expected = DataFrame(dict(A=Series(
[1, 2, 3], index=[4, 3, 5]))).reindex(index=[3, 5, 4])
tm.assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5, dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4, dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index=index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A=Series(val1, index=keys1), B=Series(
val2, index=keys2))).reindex(index=index)
tm.assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A': [1, 2, 3], 'B': np.nan})
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A': [1, 2, 3], 'B': np.nan})
tm.assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1: [1, 2], 2: [3, 4], 'a': ['a', 'b']})
result = df.loc[0, [1, 2]]
expected = Series([1, 3], index=[1, 2], dtype=object, name=0)
tm.assert_series_equal(result, expected)
expected = DataFrame({1: [5, 2], 2: [6, 4], 'a': ['a', 'b']})
df.loc[0, [1, 2]] = [5, 6]
tm.assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({'A': ['foo', 'bar', 'baz'],
'B': Series(
range(3), dtype=np.int64)})
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({'A': ['bar', 'baz', 'baz'],
'B': Series(
[1, 2, 2], dtype=np.int64)})
tm.assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
expected = DataFrame({'date': [Timestamp('20000101'), Timestamp(
'20000102'), Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')],
'val': Series(
[0, 1, 0, 1, 2], dtype=np.int64)})
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
self.assertEqual(result, exp)
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
with catch_warnings(record=True):
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23 + 0.1j, 2.2])
self.assertRaises(ValueError, f)
def f():
df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
self.assertRaises(ValueError, f)
# valid
df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,
2.2, 1.0])
result = df.loc[df.index[2:6], 'bar']
expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],
name='bar')
tm.assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1, 11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1, 4) * 1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: nan,
4: nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_loc_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
expected = df.dtypes
result = df.iloc[[0]]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[[1]]
tm.assert_series_equal(result.dtypes, expected)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
expected = df.dtypes
result = df.iloc[0:2]
tm.assert_series_equal(result.dtypes, expected)
result = df.iloc[3:]
tm.assert_series_equal(result.dtypes, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.loc[0, 'c'] = 'foo'
expected = DataFrame([{"a": 1, "c": 'foo'},
{"a": 3, "b": 2, "c": np.nan}])
tm.assert_frame_equal(df, expected)
# GH10280
df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_integer_dtype(left['foo']))
self.assertTrue(is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),
columns=['foo', 'bar', 'baz'])
tm.assert_frame_equal(left, right)
self.assertTrue(is_float_dtype(left['foo']))
self.assertTrue(is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(10, 3)
df.columns = ['a', 'a', 'b']
result = df[['b', 'a']].columns
expected = Index(['b', 'a', 'a'])
self.assert_index_equal(result, expected)
# across dtypes
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])
result.columns = list('aaaaaaa')
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
# GH 3561, dups not in selected order
df = DataFrame(
{'test': [5, 7, 9, 11],
'test1': [4., 5, 6, 7],
'other': list('abcd')}, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame(
{'test': [11, 9],
'test1': [7., 6],
'other': ['d', 'c']}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ['C', 'B', 'E']
expected = DataFrame(
{'test': [11, 9, np.nan],
'test1': [7., 6, np.nan],
'other': ['d', 'c', np.nan]}, index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F', 'G', 'H', 'C', 'B', 'E']
expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],
'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],
'other': [np.nan, np.nan, np.nan,
'd', 'c', np.nan]},
index=rows)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are
# missing
df = DataFrame(randn(4, 3), index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5, 3), index=list('AABCD'))
result = dfnu.ix[['E']]
tm.assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
tm.assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame(
{'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
tm.assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(
np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat(
[df.ix[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],
index=df.index)], axis=1)
result = df.ix[:, ['A', 'B', 'C']]
tm.assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9, 2),
index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ['a', 'b']]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ['a', 'b']]
tm.assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},
'b': {1: 111, 2: 222, 3: 333}})
# this works, new column is created correctly
df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)
# this does not work, ie column test is not changed
idx = df['test'] == '_'
temp = df.ix[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)
df.ix[idx, 'test'] = temp
self.assertEqual(df.iloc[0, 2], '-----')
# if I look at df, then element [0,2] equals '_'. If instead I type
# df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I
# get '_'.
def test_multitype_list_index_access(self):
# GH 10610
df = pd.DataFrame(np.random.random((10, 5)),
columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(KeyError):
df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC',
18: 'nonQC',
19: 'nonQC',
20: '10',
21: '11',
22: '12',
23: '13',
24: '24',
25: '35',
26: '46',
27: '47',
28: '48',
29: '59',
30: '10'},
'QC': {17: 0.0,
18: 0.0,
19: 0.0,
20: nan,
21: nan,
22: nan,
23: nan,
24: 1.0,
25: nan,
26: nan,
27: nan,
28: nan,
29: nan,
30: nan},
'data': {17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006},
'year': {17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986}}).reset_index()
result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(
columns=df.columns)
tm.assert_frame_equal(result, df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]})
result = df.set_index(['a', 'b'], drop=False)
expected = DataFrame({"a": ['R1', 'R2', np.nan, 'R4'],
'b': ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20]},
index=[Index(['R1', 'R2', np.nan, 'R4'],
name='a'),
Index(['C1', 'C2', 'C3', 'C4'], name='b')])
tm.assert_frame_equal(result, expected)
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': lrange(6),
'col2': lrange(6, 12)})
df.ix[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isnull()
cols = ['col1', 'col2']
dft = df2 * 2
dft.ix[3, 3] = np.nan
expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],
'PF': [0, 0, 0, 0, 1, 1],
'col1': Series([0, 1, 4, 6, 8, 10]),
'col2': [12, 7, 16, np.nan, 20, 22]})
# frame on rhs
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
df2.ix[mask, cols] = dft.ix[mask, cols].values
tm.assert_frame_equal(df2, expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[
0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A', 'B']:
expected.loc[mask, col] = df['D']
df.loc[df['A'] == 0, ['A', 'B']] = df['D']
tm.assert_frame_equal(df, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.ix[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.ix[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment', None):
df = pd.DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].ix[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = [1, 2, 3]
df.ix[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = TO(2)
result = DataFrame(index=[0, 1], columns=[0])
result.ix[1, 0] = TO(2)
tm.assert_frame_equal(result, df)
# remains object dtype even after setting it back
df = DataFrame(index=[0, 1], columns=[0])
df.ix[1, 0] = TO(1)
df.ix[1, 0] = np.nan
result = DataFrame(index=[0, 1], columns=[0])
tm.assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with warnings.catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_ix_slicing_strings(self):
# GH3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_non_unique_loc(self):
# GH3659
# non-unique indexer with loc slice
# https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3])
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(1, None)]))
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([slice(0, None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)]))
# monotonic are ok
df = DataFrame({'A': [1, 2, 3, 4, 5, 6],
'B': [3, 4, 5, 6, 7, 8]},
index=[0, 1, 0, 1, 2, 3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A': [2, 4, 5, 6], 'B': [4, 6, 7, 8]},
index=[1, 1, 2, 3])
tm.assert_frame_equal(result, expected)
result = df.loc[0:]
tm.assert_frame_equal(result, df)
result = df.loc[1:2]
expected = DataFrame({'A': [2, 4, 5], 'B': [4, 6, 7]},
index=[1, 1, 2])
tm.assert_frame_equal(result, expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = pd.DataFrame([1], pd.Index([pd.Timestamp('2011-01-01')],
dtype=object))
self.assertTrue(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
df = pd.DataFrame()
self.assertFalse(df.index.is_all_dates)
with tm.assertRaises(KeyError):
df['2011']
with tm.assertRaises(KeyError):
df.loc['2011', 0]
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1', 'h3', 'h5'])
columns = MultiIndex.from_tuples([('A', 'A1')], names=['main', 'sub'])
expected = DataFrame([['a', 1, 1]], index=columns, columns=index).T
result = df2.loc[:, ('A', 'A1')]
tm.assert_frame_equal(result, expected)
result = df2[('A', 'A1')]
tm.assert_frame_equal(result, expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens
# with a non-unique)
expected = Series(['a', 1, 1], index=['h1', 'h3', 'h5'], name='A1')
result = df2['A']['A1']
tm.assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d', 4, 4], ['e', 5, 5]],
index=Index(['B2', 'B2'], name='sub'),
columns=['h1', 'h3', 'h5'], ).T
result = df2['A']['B2']
tm.assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l, l2):
return pd.concat([DataFrame(randn(l, len(columns)),
index=lrange(l), columns=columns),
DataFrame(np.ones((l2, len(columns))),
index=[0] * l2, columns=columns)])
def gen_expected(df, mask):
l = len(mask)
return pd.concat([df.take([0], convert=False),
DataFrame(np.ones((l, len(columns))),
index=[0] * l,
columns=columns),
df.take(mask[1:], convert=False)])
df = gen_test(900, 100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
df = gen_test(900000, 100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df, mask)
tm.assert_frame_equal(result, expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)
expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)
expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],
columns=list('ABCDEFG'))
tm.assert_frame_equal(df, expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
cols = pd.MultiIndex.from_tuples([('A', '1'), ('B', '1'), ('A', '2')])
df = DataFrame(np.arange(3).reshape((1, 3)),
columns=cols, dtype=object)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
self.assert_index_equal(df.index, index)
# TODO(wesm): unused variables
# result = df.get_dtype_counts().sort_index()
# expected = Series({'float64': 2, 'object': 1}).sort_index()
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a', 'a', 'a', 'a', 'a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a', 'a', 'a', 'a', 'a'], name=1)
result = df.iloc[0]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
# series
s_orig = Series([1, 2, 3])
s = s_orig.copy()
s[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1, 2, 3, 5.], index=[0, 1, 2, 5])
tm.assert_series_equal(s, expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
# ## frame ##
df_orig = DataFrame(
np.arange(6).reshape(3, 2), columns=['A', 'B'], dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4, 2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4, 2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
tm.assert_frame_equal(df, expected)
expected = DataFrame(dict({'A': [0, 4, 4], 'B': [1, 5, 5]}))
df = df_orig.copy()
df.loc[1] = df.loc[2]
tm.assert_frame_equal(df, expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({'A': [0, 2, 4, 4], 'B': [1, 3, 5, 5]}))
df = df_orig.copy()
df.loc[3] = df.loc[2]
tm.assert_frame_equal(df, expected)
# single dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': [0, 2, 4]}))
df = df_orig.copy()
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({'A': [0, 2, 4], 'B': Series([0, 2, 4])}))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:, 'B'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:, 'C'] = df.ix[:, 'A']
tm.assert_frame_equal(df, expected)
# ## panel ##
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2, 4, 2),
items=['Item1', 'Item2'],
major_axis=pd.date_range('2001/1/12', periods=4),
minor_axis=['A', 'B'], dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
tm.assert_panel_equal(p, expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2, 1, 0)
expected['C'] = DataFrame({'Item1': [30, 30, 30, 30],
'Item2': [32, 32, 32, 32]},
index=p_orig.major_axis)
expected = expected.transpose(2, 1, 0)
p = p_orig.copy()
p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items)
tm.assert_panel_equal(p, expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates,
columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig, DataFrame(
{'A': 7}, index=[dates[-1] + 1])])
df = df_orig.copy()
df.loc[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 'A'] = 7
tm.assert_frame_equal(df, expected)
exp_other = DataFrame({0: 7}, index=[dates[-1] + 1])
expected = pd.concat([df_orig, exp_other], axis=1)
df = df_orig.copy()
df.loc[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.at[dates[-1] + 1, 0] = 7
tm.assert_frame_equal(df, expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
tm.assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=range(4))
tm.assert_frame_equal(df, DataFrame(columns=['A', 'B'], index=[0]))
# columns will align
df = DataFrame(columns=['A', 'B'])
df.loc[0] = Series(1, index=['B'])
exp = DataFrame([[np.nan, 1]], columns=['A', 'B'],
index=[0], dtype='float64')
tm.assert_frame_equal(df, exp)
# list-like must conform
df = DataFrame(columns=['A', 'B'])
def f():
df.loc[0] = [1, 2, 3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A', 'B'])
df.loc[3] = [6, 7]
exp = DataFrame([[6, 7]], index=[3], columns=['A', 'B'],
dtype='float64')
tm.assert_frame_equal(df, exp)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[4, 5, 6, 7]).loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4],
index=[1, 2, 3, 4]).loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_series_partial_set_with_name(self):
# GH 11497
idx = Index([1, 2], dtype='int64', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
# loc
exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[3, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx,
name='s')
result = ser.loc[[3, 2, 3, 'x']]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
result = ser.loc[[2, 2, 'x', 1]]
tm.assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda: ser.loc[[3, 3, 3]])
exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
result = ser.loc[[2, 2, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 3, 3]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[5, 4, 4]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[7, 2, 2]]
tm.assert_series_equal(result, expected, check_index_type=True)
exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
result = Series([0.1, 0.2, 0.3, 0.4], index=idx,
name='s').loc[[4, 5, 5]]
tm.assert_series_equal(result, expected, check_index_type=True)
# iloc
exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
result = ser.iloc[[1, 1, 0, 0]]
tm.assert_series_equal(result, expected, check_index_type=True)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
orig = tm.makeTimeDataFrame()
df = orig.copy()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100, :] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df = orig.copy()
df.loc['a', :] = df.ix[0]
exp = orig.append(pd.Series(df.ix[0], name='a'))
tm.assert_frame_equal(df, exp)
tm.assert_index_equal(df.index,
pd.Index(orig.index.tolist() + ['a']))
self.assertEqual(df.index.dtype, 'object')
def test_partial_set_empty_series(self):
# GH5226
# partially set with an empty object series
s = Series()
s.loc[1] = 1
tm.assert_series_equal(s, Series([1], index=[1]))
s.loc[3] = 3
tm.assert_series_equal(s, Series([1, 3], index=[1, 3]))
s = Series()
s.loc[1] = 1.
tm.assert_series_equal(s, Series([1.], index=[1]))
s.loc[3] = 3.
tm.assert_series_equal(s, Series([1., 3.], index=[1, 3]))
s = Series()
s.loc['foo'] = 1
tm.assert_series_equal(s, Series([1], index=['foo']))
s.loc['bar'] = 3
tm.assert_series_equal(s, Series([1, 3], index=['foo', 'bar']))
s.loc[3] = 4
tm.assert_series_equal(s, Series([1, 3, 4], index=['foo', 'bar', 3]))
def test_partial_set_empty_frame(self):
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1], index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:, 1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index(
[], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
tm.assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
tm.assert_frame_equal(f(), expected)
def f():
df = DataFrame()
tm.assert_index_equal(df.index, pd.Index([], dtype='object'))
df['foo'] = range(len(df))
return df
expected = DataFrame(columns=['foo'],
index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
tm.assert_frame_equal(f(), expected)
df = DataFrame()
tm.assert_index_equal(df.columns, pd.Index([], dtype=object))
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:, 1] = Series([1], index=['foo'])
tm.assert_frame_equal(df, DataFrame([[1]], index=['foo'], columns=[1]))
tm.assert_frame_equal(df, df2)
# no index to start
expected = DataFrame({0: Series(1, index=range(4))},
columns=['A', 'B', 0])
df = DataFrame(columns=['A', 'B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['A', 'B'])
df.loc[:, 0] = Series(1, index=range(4))
df.dtypes
str(df)
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_row(self):
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
tm.assert_frame_equal(y, expected)
# tm.assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
tm.assert_frame_equal(df, expected)
tm.assert_series_equal(df['c c'], Series(name='c c', dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A', 'B', 'C'])
expected = DataFrame(columns=['A', 'B', 'C'],
index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
tm.assert_frame_equal(result, expected)
def test_partial_set_empty_frame_set_series(self):
# GH 5756
# setting with empty Series
df = DataFrame(Series())
tm.assert_frame_equal(df, DataFrame({0: Series()}))
df = DataFrame(Series(name='foo'))
tm.assert_frame_equal(df, DataFrame({'foo': Series()}))
def test_partial_set_empty_frame_empty_copy_assignment(self):
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0, index=[0], columns=['a'])
tm.assert_frame_equal(df, expected)
def test_partial_set_empty_frame_empty_consistencies(self):
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x=[1, 2], y=[np.nan, np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(
dict(x=['1', '2'], y=[np.nan, np.nan]), dtype=object)
tm.assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x=[1], y=[np.nan]))
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0, 0), 'z'] = 2
result = df.loc[(0, 0), 'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e'], index=range(5))
df['f'] = 0
df.f.values[3] = 1
# TODO(wesm): unused?
# y = df.iloc[np.arange(2, len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5, 6), dtype='int64'), columns=[
'a', 'b', 'c', 'd', 'e', 'f'], index=range(5))
expected.at[3, 'f'] = 2
tm.assert_frame_equal(df, expected)
expected = Series([0, 0, 0, 2, 0], name='f')
tm.assert_series_equal(df.f, expected)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(
randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(
randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_index_type_coercion(self):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)),
Series(range(5), index=range(1, 6))]:
self.assertTrue(s.index.is_integer())
for indexer in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
indexer(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(indexer(s2)[0.1] == 0)
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
for s in [Series(range(5), index=np.arange(5.))]:
self.assertTrue(s.index.is_floating())
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
s2 = s.copy()
idxr(s2)[0.1] = 0
self.assertTrue(s2.index.is_floating())
self.assertTrue(idxr(s2)[0.1] == 0)
s2 = s.copy()
idxr(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
idxr(s2)['0'] = 0
self.assertTrue(s2.index.is_object())
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
tm.assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
tm.assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})
df2 = df.ix[[], :]
self.assertEqual(df2.loc[:, 'a'].dtype, np.int64)
tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])
tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))
s.loc[range(2)] = 43
tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
self.assertEqual(result, expected)
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
tm.assert_series_equal(start_series, expected_series)
class TestDataframeNoneCoercion(tm.TestCase):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = | DataFrame({'foo': expected_result}) | pandas.core.api.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.